2024-11-26 10:33:00,792 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-26 10:33:00,805 main DEBUG Took 0.010690 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-26 10:33:00,805 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-26 10:33:00,805 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-26 10:33:00,806 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-26 10:33:00,808 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,815 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-26 10:33:00,827 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,828 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,829 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,829 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,830 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,830 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,831 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,831 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,831 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,832 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,832 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,833 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,833 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,834 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,834 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,834 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,835 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,835 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,835 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,836 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,836 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,836 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,837 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,837 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-26 10:33:00,838 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,838 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-26 10:33:00,839 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-26 10:33:00,840 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-26 10:33:00,842 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-26 10:33:00,843 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-26 10:33:00,844 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-26 10:33:00,844 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-26 10:33:00,851 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-26 10:33:00,854 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-26 10:33:00,855 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-26 10:33:00,856 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-26 10:33:00,856 main DEBUG createAppenders(={Console}) 2024-11-26 10:33:00,857 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-26 10:33:00,857 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-26 10:33:00,857 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-26 10:33:00,858 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-26 10:33:00,858 main DEBUG OutputStream closed 2024-11-26 10:33:00,858 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-26 10:33:00,858 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-26 10:33:00,859 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-26 10:33:00,930 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-26 10:33:00,932 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-26 10:33:00,933 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-26 10:33:00,934 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-26 10:33:00,934 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-26 10:33:00,934 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-26 10:33:00,935 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-26 10:33:00,935 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-26 10:33:00,935 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-26 10:33:00,935 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-26 10:33:00,936 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-26 10:33:00,936 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-26 10:33:00,936 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-26 10:33:00,937 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-26 10:33:00,937 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-26 10:33:00,937 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-26 10:33:00,937 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-26 10:33:00,938 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-26 10:33:00,940 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-26 10:33:00,941 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-26 10:33:00,941 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-26 10:33:00,942 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-26T10:33:01,157 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122 2024-11-26 10:33:01,160 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-26 10:33:01,160 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-26T10:33:01,168 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-11-26T10:33:01,188 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-26T10:33:01,191 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/cluster_8e76cc6b-74d5-cb1e-040e-96e811924b73, deleteOnExit=true 2024-11-26T10:33:01,191 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-26T10:33:01,192 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/test.cache.data in system properties and HBase conf 2024-11-26T10:33:01,192 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/hadoop.tmp.dir in system properties and HBase conf 2024-11-26T10:33:01,193 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/hadoop.log.dir in system properties and HBase conf 2024-11-26T10:33:01,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-26T10:33:01,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-26T10:33:01,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-26T10:33:01,279 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-26T10:33:01,364 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-26T10:33:01,368 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:33:01,369 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-26T10:33:01,369 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-26T10:33:01,370 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:33:01,370 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-26T10:33:01,370 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-26T10:33:01,371 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-26T10:33:01,371 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:33:01,372 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-26T10:33:01,372 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/nfs.dump.dir in system properties and HBase conf 2024-11-26T10:33:01,372 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/java.io.tmpdir in system properties and HBase conf 2024-11-26T10:33:01,373 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-26T10:33:01,373 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-26T10:33:01,373 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-26T10:33:02,381 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-26T10:33:02,447 INFO [Time-limited test {}] log.Log(170): Logging initialized @2305ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-26T10:33:02,505 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:33:02,565 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:33:02,586 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:33:02,586 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:33:02,588 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:33:02,599 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:33:02,601 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:33:02,602 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:33:02,779 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/java.io.tmpdir/jetty-localhost-45781-hadoop-hdfs-3_4_1-tests_jar-_-any-1334427959577103085/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:33:02,787 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:45781} 2024-11-26T10:33:02,787 INFO [Time-limited test {}] server.Server(415): Started @2646ms 2024-11-26T10:33:03,268 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-26T10:33:03,277 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-26T10:33:03,278 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-26T10:33:03,279 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-26T10:33:03,279 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-26T10:33:03,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/hadoop.log.dir/,AVAILABLE} 2024-11-26T10:33:03,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-26T10:33:03,392 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/java.io.tmpdir/jetty-localhost-33985-hadoop-hdfs-3_4_1-tests_jar-_-any-5467165556803057465/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:33:03,393 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:33985} 2024-11-26T10:33:03,393 INFO [Time-limited test {}] server.Server(415): Started @3252ms 2024-11-26T10:33:03,445 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-26T10:33:04,210 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/cluster_8e76cc6b-74d5-cb1e-040e-96e811924b73/dfs/data/data2/current/BP-936542670-172.17.0.2-1732617181920/current, will proceed with Du for space computation calculation, 2024-11-26T10:33:04,210 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/cluster_8e76cc6b-74d5-cb1e-040e-96e811924b73/dfs/data/data1/current/BP-936542670-172.17.0.2-1732617181920/current, will proceed with Du for space computation calculation, 2024-11-26T10:33:04,236 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-26T10:33:04,282 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15a850eb7b8af7bb with lease ID 0x4aea7ab51069b5b9: Processing first storage report for DS-e9c25a70-91ef-4bf2-a46b-012fbfbee64c from datanode DatanodeRegistration(127.0.0.1:41261, datanodeUuid=6493beed-2905-4521-b551-7832b5ab41ae, infoPort=37797, infoSecurePort=0, ipcPort=43597, storageInfo=lv=-57;cid=testClusterID;nsid=1614848855;c=1732617181920) 2024-11-26T10:33:04,283 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15a850eb7b8af7bb with lease ID 0x4aea7ab51069b5b9: from storage DS-e9c25a70-91ef-4bf2-a46b-012fbfbee64c node DatanodeRegistration(127.0.0.1:41261, datanodeUuid=6493beed-2905-4521-b551-7832b5ab41ae, infoPort=37797, infoSecurePort=0, ipcPort=43597, storageInfo=lv=-57;cid=testClusterID;nsid=1614848855;c=1732617181920), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-26T10:33:04,284 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15a850eb7b8af7bb with lease ID 0x4aea7ab51069b5b9: Processing first storage report for DS-095cb225-19ec-41f4-9e71-2ac77adddc27 from datanode DatanodeRegistration(127.0.0.1:41261, datanodeUuid=6493beed-2905-4521-b551-7832b5ab41ae, infoPort=37797, infoSecurePort=0, ipcPort=43597, storageInfo=lv=-57;cid=testClusterID;nsid=1614848855;c=1732617181920) 2024-11-26T10:33:04,284 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15a850eb7b8af7bb with lease ID 0x4aea7ab51069b5b9: from storage DS-095cb225-19ec-41f4-9e71-2ac77adddc27 node DatanodeRegistration(127.0.0.1:41261, datanodeUuid=6493beed-2905-4521-b551-7832b5ab41ae, infoPort=37797, infoSecurePort=0, ipcPort=43597, storageInfo=lv=-57;cid=testClusterID;nsid=1614848855;c=1732617181920), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-26T10:33:04,347 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122 2024-11-26T10:33:04,411 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/cluster_8e76cc6b-74d5-cb1e-040e-96e811924b73/zookeeper_0, clientPort=61934, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/cluster_8e76cc6b-74d5-cb1e-040e-96e811924b73/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/cluster_8e76cc6b-74d5-cb1e-040e-96e811924b73/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-26T10:33:04,419 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=61934 2024-11-26T10:33:04,428 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:33:04,430 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:33:04,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741825_1001 (size=7) 2024-11-26T10:33:05,025 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1 with version=8 2024-11-26T10:33:05,025 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/hbase-staging 2024-11-26T10:33:05,129 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-26T10:33:05,360 INFO [Time-limited test {}] client.ConnectionUtils(129): master/ccf62758a0a5:0 server-side Connection retries=45 2024-11-26T10:33:05,375 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:33:05,375 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:33:05,375 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:33:05,376 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:33:05,376 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:33:05,490 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-26T10:33:05,543 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-26T10:33:05,551 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-26T10:33:05,554 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:33:05,575 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 58098 (auto-detected) 2024-11-26T10:33:05,576 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-26T10:33:05,593 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41385 2024-11-26T10:33:05,600 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:33:05,602 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:33:05,612 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:41385 connecting to ZooKeeper ensemble=127.0.0.1:61934 2024-11-26T10:33:05,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:413850x0, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:33:05,711 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41385-0x10177fdc7010000 connected 2024-11-26T10:33:05,802 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-26T10:33:05,805 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:33:05,808 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:33:05,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41385 2024-11-26T10:33:05,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41385 2024-11-26T10:33:05,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41385 2024-11-26T10:33:05,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41385 2024-11-26T10:33:05,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41385 2024-11-26T10:33:05,820 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1, hbase.cluster.distributed=false 2024-11-26T10:33:05,878 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/ccf62758a0a5:0 server-side Connection retries=45 2024-11-26T10:33:05,878 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:33:05,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-26T10:33:05,879 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-26T10:33:05,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-26T10:33:05,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-26T10:33:05,881 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-26T10:33:05,884 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-26T10:33:05,885 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45419 2024-11-26T10:33:05,886 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-26T10:33:05,890 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-26T10:33:05,892 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:33:05,894 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:33:05,898 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45419 connecting to ZooKeeper ensemble=127.0.0.1:61934 2024-11-26T10:33:05,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454190x0, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-26T10:33:05,910 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:454190x0, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-26T10:33:05,910 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45419-0x10177fdc7010001 connected 2024-11-26T10:33:05,912 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:33:05,913 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-26T10:33:05,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45419 2024-11-26T10:33:05,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45419 2024-11-26T10:33:05,915 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45419 2024-11-26T10:33:05,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45419 2024-11-26T10:33:05,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45419 2024-11-26T10:33:05,919 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/ccf62758a0a5,41385,1732617185123 2024-11-26T10:33:05,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:33:05,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:33:05,933 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ccf62758a0a5,41385,1732617185123 2024-11-26T10:33:05,933 DEBUG [M:0;ccf62758a0a5:41385 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ccf62758a0a5:41385 2024-11-26T10:33:05,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-26T10:33:05,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-26T10:33:05,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:05,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:05,960 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-26T10:33:05,961 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ccf62758a0a5,41385,1732617185123 from backup master directory 2024-11-26T10:33:05,961 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-26T10:33:05,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ccf62758a0a5,41385,1732617185123 2024-11-26T10:33:05,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:33:05,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-26T10:33:05,974 WARN [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:33:05,974 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ccf62758a0a5,41385,1732617185123 2024-11-26T10:33:05,977 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-26T10:33:05,979 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-26T10:33:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741826_1002 (size=42) 2024-11-26T10:33:06,452 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/hbase.id with ID: cbc0970b-711d-42ed-8eb2-94e428d82fc8 2024-11-26T10:33:06,499 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-26T10:33:06,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:06,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:06,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741827_1003 (size=196) 2024-11-26T10:33:06,997 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:33:06,999 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-26T10:33:07,013 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:07,016 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-26T10:33:07,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741828_1004 (size=1189) 2024-11-26T10:33:07,470 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store 2024-11-26T10:33:07,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741829_1005 (size=34) 2024-11-26T10:33:07,899 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-26T10:33:07,899 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:33:07,900 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:33:07,900 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:33:07,900 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:33:07,901 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:33:07,901 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:33:07,901 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:33:07,901 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-26T10:33:07,903 WARN [master/ccf62758a0a5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/.initializing 2024-11-26T10:33:07,903 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/WALs/ccf62758a0a5,41385,1732617185123 2024-11-26T10:33:07,909 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-26T10:33:07,919 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ccf62758a0a5%2C41385%2C1732617185123, suffix=, logDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/WALs/ccf62758a0a5,41385,1732617185123, archiveDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/oldWALs, maxLogs=10 2024-11-26T10:33:07,939 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/WALs/ccf62758a0a5,41385,1732617185123/ccf62758a0a5%2C41385%2C1732617185123.1732617187924, exclude list is [], retry=0 2024-11-26T10:33:07,954 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41261,DS-e9c25a70-91ef-4bf2-a46b-012fbfbee64c,DISK] 2024-11-26T10:33:07,956 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-26T10:33:07,987 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/WALs/ccf62758a0a5,41385,1732617185123/ccf62758a0a5%2C41385%2C1732617185123.1732617187924 2024-11-26T10:33:07,988 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37797:37797)] 2024-11-26T10:33:07,988 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:33:07,989 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:33:07,992 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:33:07,993 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:33:08,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:33:08,049 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-26T10:33:08,053 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:08,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:33:08,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:33:08,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-26T10:33:08,059 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:08,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:08,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:33:08,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-26T10:33:08,064 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:08,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:08,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:33:08,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-26T10:33:08,069 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:08,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:08,074 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:33:08,075 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:33:08,084 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-26T10:33:08,088 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-26T10:33:08,092 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:33:08,094 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62258366, jitterRate=-0.07227805256843567}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-26T10:33:08,100 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-26T10:33:08,101 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-26T10:33:08,127 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d4b158b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:08,154 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-26T10:33:08,165 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-26T10:33:08,166 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-26T10:33:08,168 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-26T10:33:08,169 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-26T10:33:08,175 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-11-26T10:33:08,175 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-26T10:33:08,202 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-26T10:33:08,217 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-26T10:33:08,267 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-26T10:33:08,271 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-26T10:33:08,273 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-26T10:33:08,284 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-26T10:33:08,286 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-26T10:33:08,290 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-26T10:33:08,301 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-26T10:33:08,303 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-26T10:33:08,314 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-26T10:33:08,328 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-26T10:33:08,339 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-26T10:33:08,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:33:08,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-26T10:33:08,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:08,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:08,352 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=ccf62758a0a5,41385,1732617185123, sessionid=0x10177fdc7010000, setting cluster-up flag (Was=false) 2024-11-26T10:33:08,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:08,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:08,409 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-26T10:33:08,411 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ccf62758a0a5,41385,1732617185123 2024-11-26T10:33:08,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:08,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:08,456 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-26T10:33:08,458 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ccf62758a0a5,41385,1732617185123 2024-11-26T10:33:08,533 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-26T10:33:08,534 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ccf62758a0a5:45419 2024-11-26T10:33:08,536 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1008): ClusterId : cbc0970b-711d-42ed-8eb2-94e428d82fc8 2024-11-26T10:33:08,538 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-26T10:33:08,539 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-26T10:33:08,542 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-26T10:33:08,546 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ccf62758a0a5,41385,1732617185123 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-26T10:33:08,549 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-26T10:33:08,550 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-26T10:33:08,550 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ccf62758a0a5:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:33:08,550 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ccf62758a0a5:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:33:08,551 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ccf62758a0a5:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:33:08,551 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ccf62758a0a5:0, corePoolSize=5, maxPoolSize=5 2024-11-26T10:33:08,551 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ccf62758a0a5:0, corePoolSize=10, maxPoolSize=10 2024-11-26T10:33:08,551 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,551 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ccf62758a0a5:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:33:08,551 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,553 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732617218553 2024-11-26T10:33:08,555 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-26T10:33:08,556 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-26T10:33:08,557 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:33:08,557 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-26T10:33:08,560 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-26T10:33:08,560 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-26T10:33:08,560 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-26T10:33:08,560 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-26T10:33:08,561 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-26T10:33:08,561 DEBUG [RS:0;ccf62758a0a5:45419 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e66b4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:08,563 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:08,563 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:08,563 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-26T10:33:08,564 DEBUG [RS:0;ccf62758a0a5:45419 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@489343f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ccf62758a0a5/172.17.0.2:0 2024-11-26T10:33:08,565 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-26T10:33:08,566 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-26T10:33:08,567 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-26T10:33:08,568 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-26T10:33:08,568 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-26T10:33:08,569 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-26T10:33:08,569 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-26T10:33:08,569 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-26T10:33:08,571 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ccf62758a0a5:0:becomeActiveMaster-HFileCleaner.large.0-1732617188571,5,FailOnTimeoutGroup] 2024-11-26T10:33:08,571 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(3073): reportForDuty to master=ccf62758a0a5,41385,1732617185123 with isa=ccf62758a0a5/172.17.0.2:45419, startcode=1732617185877 2024-11-26T10:33:08,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741831_1007 (size=1039) 2024-11-26T10:33:08,575 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ccf62758a0a5:0:becomeActiveMaster-HFileCleaner.small.0-1732617188571,5,FailOnTimeoutGroup] 2024-11-26T10:33:08,576 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:08,576 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-26T10:33:08,577 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:08,577 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:08,583 DEBUG [RS:0;ccf62758a0a5:45419 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-26T10:33:08,620 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40625, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-26T10:33:08,627 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41385 {}] master.ServerManager(332): Checking decommissioned status of RegionServer ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:08,629 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41385 {}] master.ServerManager(486): Registering regionserver=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:08,644 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1 2024-11-26T10:33:08,644 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:44321 2024-11-26T10:33:08,644 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-26T10:33:08,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:33:08,656 DEBUG [RS:0;ccf62758a0a5:45419 {}] zookeeper.ZKUtil(111): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:08,657 WARN [RS:0;ccf62758a0a5:45419 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-26T10:33:08,657 INFO [RS:0;ccf62758a0a5:45419 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-26T10:33:08,657 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/WALs/ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:08,659 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ccf62758a0a5,45419,1732617185877] 2024-11-26T10:33:08,671 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-26T10:33:08,682 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-26T10:33:08,698 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-26T10:33:08,701 INFO [RS:0;ccf62758a0a5:45419 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-26T10:33:08,702 INFO [RS:0;ccf62758a0a5:45419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:08,702 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-26T10:33:08,709 INFO [RS:0;ccf62758a0a5:45419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:08,709 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,710 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,710 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,710 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,710 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,710 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ccf62758a0a5:0, corePoolSize=2, maxPoolSize=2 2024-11-26T10:33:08,711 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,711 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,711 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,711 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,711 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ccf62758a0a5:0, corePoolSize=1, maxPoolSize=1 2024-11-26T10:33:08,711 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ccf62758a0a5:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:33:08,711 DEBUG [RS:0;ccf62758a0a5:45419 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0, corePoolSize=3, maxPoolSize=3 2024-11-26T10:33:08,712 INFO [RS:0;ccf62758a0a5:45419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:08,713 INFO [RS:0;ccf62758a0a5:45419 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:08,713 INFO [RS:0;ccf62758a0a5:45419 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:08,713 INFO [RS:0;ccf62758a0a5:45419 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:08,713 INFO [RS:0;ccf62758a0a5:45419 {}] hbase.ChoreService(168): Chore ScheduledChore name=ccf62758a0a5,45419,1732617185877-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:33:08,740 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-26T10:33:08,741 INFO [RS:0;ccf62758a0a5:45419 {}] hbase.ChoreService(168): Chore ScheduledChore name=ccf62758a0a5,45419,1732617185877-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:08,763 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.Replication(204): ccf62758a0a5,45419,1732617185877 started 2024-11-26T10:33:08,764 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1767): Serving as ccf62758a0a5,45419,1732617185877, RpcServer on ccf62758a0a5/172.17.0.2:45419, sessionid=0x10177fdc7010001 2024-11-26T10:33:08,765 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-26T10:33:08,765 DEBUG [RS:0;ccf62758a0a5:45419 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:08,765 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ccf62758a0a5,45419,1732617185877' 2024-11-26T10:33:08,765 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-26T10:33:08,766 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-26T10:33:08,767 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-26T10:33:08,767 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-26T10:33:08,767 DEBUG [RS:0;ccf62758a0a5:45419 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:08,767 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ccf62758a0a5,45419,1732617185877' 2024-11-26T10:33:08,767 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-26T10:33:08,767 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-26T10:33:08,768 DEBUG [RS:0;ccf62758a0a5:45419 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-26T10:33:08,768 INFO [RS:0;ccf62758a0a5:45419 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-26T10:33:08,768 INFO [RS:0;ccf62758a0a5:45419 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-26T10:33:08,875 INFO [RS:0;ccf62758a0a5:45419 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-26T10:33:08,878 INFO [RS:0;ccf62758a0a5:45419 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ccf62758a0a5%2C45419%2C1732617185877, suffix=, logDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/WALs/ccf62758a0a5,45419,1732617185877, archiveDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/oldWALs, maxLogs=32 2024-11-26T10:33:08,894 DEBUG [RS:0;ccf62758a0a5:45419 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/WALs/ccf62758a0a5,45419,1732617185877/ccf62758a0a5%2C45419%2C1732617185877.1732617188881, exclude list is [], retry=0 2024-11-26T10:33:08,899 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41261,DS-e9c25a70-91ef-4bf2-a46b-012fbfbee64c,DISK] 2024-11-26T10:33:08,903 INFO [RS:0;ccf62758a0a5:45419 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/WALs/ccf62758a0a5,45419,1732617185877/ccf62758a0a5%2C45419%2C1732617185877.1732617188881 2024-11-26T10:33:08,903 DEBUG [RS:0;ccf62758a0a5:45419 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37797:37797)] 2024-11-26T10:33:08,975 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-26T10:33:08,975 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1 2024-11-26T10:33:08,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741833_1009 (size=32) 2024-11-26T10:33:09,390 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:33:09,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:33:09,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:33:09,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:09,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:33:09,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:33:09,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:33:09,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:09,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:33:09,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:33:09,407 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:33:09,407 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:09,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:33:09,409 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740 2024-11-26T10:33:09,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740 2024-11-26T10:33:09,413 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:33:09,416 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-26T10:33:09,419 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:33:09,420 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60126880, jitterRate=-0.10403966903686523}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:33:09,424 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-26T10:33:09,424 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:33:09,424 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-26T10:33:09,424 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-26T10:33:09,424 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:33:09,424 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:33:09,425 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-26T10:33:09,425 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-26T10:33:09,428 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-26T10:33:09,428 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-26T10:33:09,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-26T10:33:09,440 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-26T10:33:09,443 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-26T10:33:09,596 DEBUG [ccf62758a0a5:41385 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-26T10:33:09,604 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:09,611 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ccf62758a0a5,45419,1732617185877, state=OPENING 2024-11-26T10:33:09,647 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-26T10:33:09,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:09,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:09,658 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:33:09,658 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:33:09,661 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:33:09,845 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:09,846 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-26T10:33:09,849 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-26T10:33:09,859 INFO [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-26T10:33:09,859 INFO [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-26T10:33:09,860 INFO [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-26T10:33:09,863 INFO [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ccf62758a0a5%2C45419%2C1732617185877.meta, suffix=.meta, logDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/WALs/ccf62758a0a5,45419,1732617185877, archiveDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/oldWALs, maxLogs=32 2024-11-26T10:33:09,878 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/WALs/ccf62758a0a5,45419,1732617185877/ccf62758a0a5%2C45419%2C1732617185877.meta.1732617189865.meta, exclude list is [], retry=0 2024-11-26T10:33:09,882 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41261,DS-e9c25a70-91ef-4bf2-a46b-012fbfbee64c,DISK] 2024-11-26T10:33:09,885 INFO [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/WALs/ccf62758a0a5,45419,1732617185877/ccf62758a0a5%2C45419%2C1732617185877.meta.1732617189865.meta 2024-11-26T10:33:09,885 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37797:37797)] 2024-11-26T10:33:09,885 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:33:09,887 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-26T10:33:09,937 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-26T10:33:09,941 INFO [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-26T10:33:09,944 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-26T10:33:09,944 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:33:09,944 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-26T10:33:09,945 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-26T10:33:09,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-26T10:33:09,949 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-26T10:33:09,950 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:09,950 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:33:09,951 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-26T10:33:09,952 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-26T10:33:09,952 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:09,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:33:09,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-26T10:33:09,954 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-26T10:33:09,954 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:09,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-26T10:33:09,957 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740 2024-11-26T10:33:09,959 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740 2024-11-26T10:33:09,962 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:33:09,965 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-26T10:33:09,967 INFO [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74832965, jitterRate=0.11509807407855988}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:33:09,968 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-26T10:33:09,975 INFO [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732617189840 2024-11-26T10:33:09,984 DEBUG [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-26T10:33:09,985 INFO [RS_OPEN_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-26T10:33:09,986 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:09,988 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ccf62758a0a5,45419,1732617185877, state=OPEN 2024-11-26T10:33:10,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:33:10,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-26T10:33:10,021 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:33:10,021 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-26T10:33:10,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-26T10:33:10,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=ccf62758a0a5,45419,1732617185877 in 361 msec 2024-11-26T10:33:10,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-26T10:33:10,037 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 596 msec 2024-11-26T10:33:10,042 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5470 sec 2024-11-26T10:33:10,043 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732617190042, completionTime=-1 2024-11-26T10:33:10,043 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-26T10:33:10,043 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-26T10:33:10,075 DEBUG [hconnection-0x19eefe61-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:10,077 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33150, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:10,087 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-26T10:33:10,087 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732617250087 2024-11-26T10:33:10,087 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732617310087 2024-11-26T10:33:10,087 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 44 msec 2024-11-26T10:33:10,133 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ccf62758a0a5,41385,1732617185123-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:10,133 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ccf62758a0a5,41385,1732617185123-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:10,134 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ccf62758a0a5,41385,1732617185123-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:10,135 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ccf62758a0a5:41385, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:10,136 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:10,142 DEBUG [master/ccf62758a0a5:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-26T10:33:10,145 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-26T10:33:10,146 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-26T10:33:10,151 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-26T10:33:10,153 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:33:10,154 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:10,156 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:33:10,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741835_1011 (size=358) 2024-11-26T10:33:10,575 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b7271e3c105b406e8a3f3f956110c7a1, NAME => 'hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1 2024-11-26T10:33:10,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741836_1012 (size=42) 2024-11-26T10:33:10,990 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:33:10,990 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing b7271e3c105b406e8a3f3f956110c7a1, disabling compactions & flushes 2024-11-26T10:33:10,991 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:33:10,991 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:33:10,991 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. after waiting 0 ms 2024-11-26T10:33:10,991 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:33:10,991 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:33:10,991 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for b7271e3c105b406e8a3f3f956110c7a1: 2024-11-26T10:33:10,995 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:33:11,003 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732617190997"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732617190997"}]},"ts":"1732617190997"} 2024-11-26T10:33:11,024 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-26T10:33:11,026 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:33:11,029 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617191026"}]},"ts":"1732617191026"} 2024-11-26T10:33:11,033 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-26T10:33:11,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=b7271e3c105b406e8a3f3f956110c7a1, ASSIGN}] 2024-11-26T10:33:11,088 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=b7271e3c105b406e8a3f3f956110c7a1, ASSIGN 2024-11-26T10:33:11,090 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=b7271e3c105b406e8a3f3f956110c7a1, ASSIGN; state=OFFLINE, location=ccf62758a0a5,45419,1732617185877; forceNewPlan=false, retain=false 2024-11-26T10:33:11,241 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=b7271e3c105b406e8a3f3f956110c7a1, regionState=OPENING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:11,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure b7271e3c105b406e8a3f3f956110c7a1, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:33:11,403 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:11,417 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:33:11,418 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => b7271e3c105b406e8a3f3f956110c7a1, NAME => 'hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:33:11,418 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace b7271e3c105b406e8a3f3f956110c7a1 2024-11-26T10:33:11,419 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:33:11,419 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for b7271e3c105b406e8a3f3f956110c7a1 2024-11-26T10:33:11,419 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for b7271e3c105b406e8a3f3f956110c7a1 2024-11-26T10:33:11,423 INFO [StoreOpener-b7271e3c105b406e8a3f3f956110c7a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b7271e3c105b406e8a3f3f956110c7a1 2024-11-26T10:33:11,425 INFO [StoreOpener-b7271e3c105b406e8a3f3f956110c7a1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7271e3c105b406e8a3f3f956110c7a1 columnFamilyName info 2024-11-26T10:33:11,425 DEBUG [StoreOpener-b7271e3c105b406e8a3f3f956110c7a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:11,426 INFO [StoreOpener-b7271e3c105b406e8a3f3f956110c7a1-1 {}] regionserver.HStore(327): Store=b7271e3c105b406e8a3f3f956110c7a1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:11,428 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/namespace/b7271e3c105b406e8a3f3f956110c7a1 2024-11-26T10:33:11,429 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/namespace/b7271e3c105b406e8a3f3f956110c7a1 2024-11-26T10:33:11,433 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for b7271e3c105b406e8a3f3f956110c7a1 2024-11-26T10:33:11,438 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/namespace/b7271e3c105b406e8a3f3f956110c7a1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:33:11,439 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened b7271e3c105b406e8a3f3f956110c7a1; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71841697, jitterRate=0.07052470743656158}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-26T10:33:11,441 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for b7271e3c105b406e8a3f3f956110c7a1: 2024-11-26T10:33:11,444 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1., pid=6, masterSystemTime=1732617191403 2024-11-26T10:33:11,448 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:33:11,448 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:33:11,449 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=b7271e3c105b406e8a3f3f956110c7a1, regionState=OPEN, openSeqNum=2, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:11,457 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-26T10:33:11,458 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure b7271e3c105b406e8a3f3f956110c7a1, server=ccf62758a0a5,45419,1732617185877 in 206 msec 2024-11-26T10:33:11,460 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-26T10:33:11,460 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=b7271e3c105b406e8a3f3f956110c7a1, ASSIGN in 374 msec 2024-11-26T10:33:11,462 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:33:11,462 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617191462"}]},"ts":"1732617191462"} 2024-11-26T10:33:11,465 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-26T10:33:11,477 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:33:11,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3310 sec 2024-11-26T10:33:11,556 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-26T10:33:11,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-26T10:33:11,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:11,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:33:11,594 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-26T10:33:11,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-26T10:33:11,629 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 37 msec 2024-11-26T10:33:11,639 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-26T10:33:11,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-26T10:33:11,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 32 msec 2024-11-26T10:33:11,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-26T10:33:11,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-26T10:33:11,718 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.743sec 2024-11-26T10:33:11,720 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-26T10:33:11,723 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-26T10:33:11,724 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-26T10:33:11,726 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-26T10:33:11,726 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-26T10:33:11,727 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ccf62758a0a5,41385,1732617185123-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-26T10:33:11,727 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ccf62758a0a5,41385,1732617185123-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-26T10:33:11,733 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-26T10:33:11,734 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-26T10:33:11,734 INFO [master/ccf62758a0a5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ccf62758a0a5,41385,1732617185123-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-26T10:33:11,739 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e541e88 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e83c466 2024-11-26T10:33:11,739 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-26T10:33:11,753 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a704f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:11,757 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-26T10:33:11,757 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-26T10:33:11,766 DEBUG [hconnection-0x68773b0e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:11,774 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33158, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:11,781 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=ccf62758a0a5,41385,1732617185123 2024-11-26T10:33:11,793 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=278, ProcessCount=11, AvailableMemoryMB=5963 2024-11-26T10:33:11,816 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:33:11,821 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35592, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:33:11,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-26T10:33:11,832 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:33:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-26T10:33:11,836 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:33:11,837 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-26T10:33:11,837 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:11,839 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:33:11,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-26T10:33:11,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741837_1013 (size=963) 2024-11-26T10:33:11,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-26T10:33:12,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-26T10:33:12,259 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1 2024-11-26T10:33:12,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741838_1014 (size=53) 2024-11-26T10:33:12,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-26T10:33:12,676 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:33:12,676 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6a931c8e80842c8947954ecd8357e9ad, disabling compactions & flushes 2024-11-26T10:33:12,676 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:12,677 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:12,677 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. after waiting 0 ms 2024-11-26T10:33:12,677 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:12,677 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:12,678 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:12,682 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:33:12,683 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732617192683"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732617192683"}]},"ts":"1732617192683"} 2024-11-26T10:33:12,689 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-26T10:33:12,691 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:33:12,691 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617192691"}]},"ts":"1732617192691"} 2024-11-26T10:33:12,694 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-26T10:33:12,739 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a931c8e80842c8947954ecd8357e9ad, ASSIGN}] 2024-11-26T10:33:12,743 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a931c8e80842c8947954ecd8357e9ad, ASSIGN 2024-11-26T10:33:12,747 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a931c8e80842c8947954ecd8357e9ad, ASSIGN; state=OFFLINE, location=ccf62758a0a5,45419,1732617185877; forceNewPlan=false, retain=false 2024-11-26T10:33:12,898 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6a931c8e80842c8947954ecd8357e9ad, regionState=OPENING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:12,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:33:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-26T10:33:13,061 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:13,068 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:13,069 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:33:13,069 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:13,069 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:33:13,069 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:13,070 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:13,072 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:13,077 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:33:13,077 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a931c8e80842c8947954ecd8357e9ad columnFamilyName A 2024-11-26T10:33:13,077 DEBUG [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:13,079 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] regionserver.HStore(327): Store=6a931c8e80842c8947954ecd8357e9ad/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:13,079 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:13,081 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:33:13,082 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a931c8e80842c8947954ecd8357e9ad columnFamilyName B 2024-11-26T10:33:13,082 DEBUG [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:13,083 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] regionserver.HStore(327): Store=6a931c8e80842c8947954ecd8357e9ad/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:13,083 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:13,086 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:33:13,086 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a931c8e80842c8947954ecd8357e9ad columnFamilyName C 2024-11-26T10:33:13,087 DEBUG [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:13,087 INFO [StoreOpener-6a931c8e80842c8947954ecd8357e9ad-1 {}] regionserver.HStore(327): Store=6a931c8e80842c8947954ecd8357e9ad/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:13,088 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:13,089 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:13,090 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:13,093 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:33:13,095 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:13,098 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:33:13,099 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 6a931c8e80842c8947954ecd8357e9ad; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66273052, jitterRate=-0.012454569339752197}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:33:13,100 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:13,102 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., pid=11, masterSystemTime=1732617193060 2024-11-26T10:33:13,105 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:13,105 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:13,106 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6a931c8e80842c8947954ecd8357e9ad, regionState=OPEN, openSeqNum=2, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:13,107 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41385 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=ccf62758a0a5,45419,1732617185877, table=TestAcidGuarantees, region=6a931c8e80842c8947954ecd8357e9ad. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-26T10:33:13,112 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-26T10:33:13,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 in 205 msec 2024-11-26T10:33:13,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-26T10:33:13,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a931c8e80842c8947954ecd8357e9ad, ASSIGN in 373 msec 2024-11-26T10:33:13,116 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:33:13,116 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617193116"}]},"ts":"1732617193116"} 2024-11-26T10:33:13,118 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-26T10:33:13,127 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:33:13,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2950 sec 2024-11-26T10:33:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-26T10:33:13,967 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-26T10:33:13,974 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f6e36fe to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e98ea32 2024-11-26T10:33:14,019 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9fcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:14,025 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:14,029 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:14,032 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:33:14,034 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35596, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:33:14,041 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-11-26T10:33:14,051 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd0964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:14,053 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22cb07dd to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72b32f98 2024-11-26T10:33:14,065 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:14,067 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-11-26T10:33:14,077 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b55c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:14,079 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5400112e to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bbb5d8a 2024-11-26T10:33:14,090 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:14,092 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38766d64 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18603bb9 2024-11-26T10:33:14,101 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3883f7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:14,104 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-11-26T10:33:14,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:14,117 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-11-26T10:33:14,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:14,128 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-11-26T10:33:14,140 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:14,141 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c915d17 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6b07e3 2024-11-26T10:33:14,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595e9ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:14,155 DEBUG [hconnection-0x76214a8b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:14,156 DEBUG [hconnection-0x460db217-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:14,159 DEBUG [hconnection-0x3a4b465a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:14,160 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33168, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:14,163 DEBUG [hconnection-0x2cbdc5ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:14,163 DEBUG [hconnection-0x5fa3171e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:14,163 DEBUG [hconnection-0x13693b96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:14,164 DEBUG [hconnection-0x78b714ed-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:14,164 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:14,165 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33170, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:14,166 DEBUG [hconnection-0x12418eb5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:14,166 DEBUG [hconnection-0x2534ab3b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:14,168 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:14,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-26T10:33:14,172 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:14,173 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:14,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-26T10:33:14,173 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:14,175 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:14,177 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:14,184 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:14,184 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33222, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:14,187 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:14,191 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:14,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:14,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-26T10:33:14,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:14,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:14,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:14,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:14,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:14,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:14,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-26T10:33:14,345 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-26T10:33:14,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:14,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:14,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:14,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/57d5077aa3ab4572bd1036d9e6ae3016 is 50, key is test_row_0/A:col10/1732617194240/Put/seqid=0 2024-11-26T10:33:14,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617254377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617254376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617254378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741839_1015 (size=9657) 2024-11-26T10:33:14,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617254378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617254380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-26T10:33:14,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617254543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617254543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617254544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617254544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617254544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,554 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-26T10:33:14,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:14,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:14,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:14,564 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,677 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-26T10:33:14,678 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-26T10:33:14,679 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-26T10:33:14,717 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,718 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-26T10:33:14,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:14,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:14,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:14,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617254749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617254752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617254755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617254755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:14,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617254757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-26T10:33:14,811 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/57d5077aa3ab4572bd1036d9e6ae3016 2024-11-26T10:33:14,877 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:14,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-26T10:33:14,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:14,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:14,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:14,878 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:14,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/b10d86ffa79e4c53a6b807ac43c6b78c is 50, key is test_row_0/B:col10/1732617194240/Put/seqid=0 2024-11-26T10:33:14,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741840_1016 (size=9657) 2024-11-26T10:33:14,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/b10d86ffa79e4c53a6b807ac43c6b78c 2024-11-26T10:33:15,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/398abb2cc49d48718b6d75448f7cda1d is 50, key is test_row_0/C:col10/1732617194240/Put/seqid=0 2024-11-26T10:33:15,032 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-26T10:33:15,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:15,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:15,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:15,034 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:15,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:15,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:15,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741841_1017 (size=9657) 2024-11-26T10:33:15,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/398abb2cc49d48718b6d75448f7cda1d 2024-11-26T10:33:15,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617255055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617255059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/57d5077aa3ab4572bd1036d9e6ae3016 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/57d5077aa3ab4572bd1036d9e6ae3016 2024-11-26T10:33:15,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617255062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617255064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617255062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/57d5077aa3ab4572bd1036d9e6ae3016, entries=100, sequenceid=13, filesize=9.4 K 2024-11-26T10:33:15,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/b10d86ffa79e4c53a6b807ac43c6b78c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b10d86ffa79e4c53a6b807ac43c6b78c 2024-11-26T10:33:15,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b10d86ffa79e4c53a6b807ac43c6b78c, entries=100, sequenceid=13, filesize=9.4 K 2024-11-26T10:33:15,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/398abb2cc49d48718b6d75448f7cda1d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/398abb2cc49d48718b6d75448f7cda1d 2024-11-26T10:33:15,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/398abb2cc49d48718b6d75448f7cda1d, entries=100, sequenceid=13, filesize=9.4 K 2024-11-26T10:33:15,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 6a931c8e80842c8947954ecd8357e9ad in 884ms, sequenceid=13, compaction requested=false 2024-11-26T10:33:15,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:15,193 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-26T10:33:15,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:15,194 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-26T10:33:15,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:15,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:15,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:15,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:15,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:15,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:15,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/8be99741b59e4581a589a6f6632be054 is 50, key is test_row_0/A:col10/1732617194371/Put/seqid=0 2024-11-26T10:33:15,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741842_1018 (size=12001) 2024-11-26T10:33:15,238 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/8be99741b59e4581a589a6f6632be054 2024-11-26T10:33:15,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/0c9ce492bfd34091806038446f5d10a0 is 50, key is test_row_0/B:col10/1732617194371/Put/seqid=0 2024-11-26T10:33:15,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-26T10:33:15,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741843_1019 (size=12001) 2024-11-26T10:33:15,458 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-26T10:33:15,547 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-26T10:33:15,549 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-26T10:33:15,551 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-26T10:33:15,551 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-26T10:33:15,553 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:33:15,553 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-26T10:33:15,554 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-26T10:33:15,554 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-26T10:33:15,555 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-26T10:33:15,555 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-26T10:33:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:15,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:15,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617255583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617255586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617255586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617255587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617255590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,693 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/0c9ce492bfd34091806038446f5d10a0 2024-11-26T10:33:15,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617255694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617255696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617255696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:15,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617255698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617255698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:15,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e808e7ecb9e140b495c66e945bf9f70f is 50, key is test_row_0/C:col10/1732617194371/Put/seqid=0 2024-11-26T10:33:15,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741844_1020 (size=12001) 2024-11-26T10:33:15,753 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e808e7ecb9e140b495c66e945bf9f70f 2024-11-26T10:33:15,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/8be99741b59e4581a589a6f6632be054 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8be99741b59e4581a589a6f6632be054 2024-11-26T10:33:15,788 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8be99741b59e4581a589a6f6632be054, entries=150, sequenceid=38, filesize=11.7 K 2024-11-26T10:33:15,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/0c9ce492bfd34091806038446f5d10a0 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0c9ce492bfd34091806038446f5d10a0 2024-11-26T10:33:15,809 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0c9ce492bfd34091806038446f5d10a0, entries=150, sequenceid=38, filesize=11.7 K 2024-11-26T10:33:15,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e808e7ecb9e140b495c66e945bf9f70f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e808e7ecb9e140b495c66e945bf9f70f 2024-11-26T10:33:15,835 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e808e7ecb9e140b495c66e945bf9f70f, entries=150, sequenceid=38, filesize=11.7 K 2024-11-26T10:33:15,838 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6a931c8e80842c8947954ecd8357e9ad in 643ms, sequenceid=38, compaction requested=false 2024-11-26T10:33:15,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:15,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:15,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-26T10:33:15,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-26T10:33:15,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-26T10:33:15,846 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6640 sec 2024-11-26T10:33:15,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.6810 sec 2024-11-26T10:33:15,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:33:15,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:15,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:15,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:15,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:15,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:15,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:15,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:15,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/70ad99ed8b554192bbe90cf5558280d3 is 50, key is test_row_0/A:col10/1732617195585/Put/seqid=0 2024-11-26T10:33:15,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741845_1021 (size=14341) 2024-11-26T10:33:16,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617256018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617256018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617256021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617256022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617256024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617256128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617256128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617256132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617256133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617256133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-26T10:33:16,288 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-26T10:33:16,292 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-26T10:33:16,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-26T10:33:16,295 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:16,297 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:16,297 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:16,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617256338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617256338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617256339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617256339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617256340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/70ad99ed8b554192bbe90cf5558280d3 2024-11-26T10:33:16,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/a3e0935f3b44466c9f28d890e0f5adfb is 50, key is test_row_0/B:col10/1732617195585/Put/seqid=0 2024-11-26T10:33:16,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741846_1022 (size=12001) 2024-11-26T10:33:16,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-26T10:33:16,451 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-26T10:33:16,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:16,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:16,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:16,453 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-26T10:33:16,607 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-26T10:33:16,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:16,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:16,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:16,609 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617256644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617256644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617256648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:16,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617256650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617256649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,765 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-26T10:33:16,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:16,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:16,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:16,766 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/a3e0935f3b44466c9f28d890e0f5adfb 2024-11-26T10:33:16,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/b8ded2282a824f279bd554ad8b5b596e is 50, key is test_row_0/C:col10/1732617195585/Put/seqid=0 2024-11-26T10:33:16,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741847_1023 (size=12001) 2024-11-26T10:33:16,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/b8ded2282a824f279bd554ad8b5b596e 2024-11-26T10:33:16,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/70ad99ed8b554192bbe90cf5558280d3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/70ad99ed8b554192bbe90cf5558280d3 2024-11-26T10:33:16,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/70ad99ed8b554192bbe90cf5558280d3, entries=200, sequenceid=52, filesize=14.0 K 2024-11-26T10:33:16,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/a3e0935f3b44466c9f28d890e0f5adfb as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/a3e0935f3b44466c9f28d890e0f5adfb 2024-11-26T10:33:16,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-26T10:33:16,903 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/a3e0935f3b44466c9f28d890e0f5adfb, entries=150, sequenceid=52, filesize=11.7 K 2024-11-26T10:33:16,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/b8ded2282a824f279bd554ad8b5b596e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b8ded2282a824f279bd554ad8b5b596e 2024-11-26T10:33:16,920 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:16,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-26T10:33:16,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:16,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:16,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:16,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:16,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b8ded2282a824f279bd554ad8b5b596e, entries=150, sequenceid=52, filesize=11.7 K 2024-11-26T10:33:16,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6a931c8e80842c8947954ecd8357e9ad in 1021ms, sequenceid=52, compaction requested=true 2024-11-26T10:33:16,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:16,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:16,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:16,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:16,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:16,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:16,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:16,933 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:16,934 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:16,937 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:16,939 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:16,939 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:16,940 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b10d86ffa79e4c53a6b807ac43c6b78c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0c9ce492bfd34091806038446f5d10a0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/a3e0935f3b44466c9f28d890e0f5adfb] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=32.9 K 2024-11-26T10:33:16,941 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b10d86ffa79e4c53a6b807ac43c6b78c, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732617194180 2024-11-26T10:33:16,942 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:16,942 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c9ce492bfd34091806038446f5d10a0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732617194371 2024-11-26T10:33:16,943 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:16,944 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a3e0935f3b44466c9f28d890e0f5adfb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617195582 2024-11-26T10:33:16,944 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:16,944 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/57d5077aa3ab4572bd1036d9e6ae3016, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8be99741b59e4581a589a6f6632be054, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/70ad99ed8b554192bbe90cf5558280d3] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=35.2 K 2024-11-26T10:33:16,946 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57d5077aa3ab4572bd1036d9e6ae3016, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732617194180 2024-11-26T10:33:16,951 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8be99741b59e4581a589a6f6632be054, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732617194371 2024-11-26T10:33:16,952 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70ad99ed8b554192bbe90cf5558280d3, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617195582 2024-11-26T10:33:16,987 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#9 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:16,988 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/11b06e87694b4c46a8ef4ac327afd461 is 50, key is test_row_0/B:col10/1732617195585/Put/seqid=0 2024-11-26T10:33:16,999 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#10 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:17,001 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/2ff3cb97124c4d0d8101e3bee5de081f is 50, key is test_row_0/A:col10/1732617195585/Put/seqid=0 2024-11-26T10:33:17,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741848_1024 (size=12104) 2024-11-26T10:33:17,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741849_1025 (size=12104) 2024-11-26T10:33:17,041 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/11b06e87694b4c46a8ef4ac327afd461 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/11b06e87694b4c46a8ef4ac327afd461 2024-11-26T10:33:17,047 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/2ff3cb97124c4d0d8101e3bee5de081f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/2ff3cb97124c4d0d8101e3bee5de081f 2024-11-26T10:33:17,069 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into 11b06e87694b4c46a8ef4ac327afd461(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:17,069 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:17,069 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=13, startTime=1732617196932; duration=0sec 2024-11-26T10:33:17,070 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:17,070 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:17,070 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:17,071 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into 2ff3cb97124c4d0d8101e3bee5de081f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:17,071 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:17,071 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=13, startTime=1732617196932; duration=0sec 2024-11-26T10:33:17,072 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:17,072 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:17,074 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:17,074 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/C is initiating minor compaction (all files) 2024-11-26T10:33:17,074 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/C in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:17,074 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/398abb2cc49d48718b6d75448f7cda1d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e808e7ecb9e140b495c66e945bf9f70f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b8ded2282a824f279bd554ad8b5b596e] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=32.9 K 2024-11-26T10:33:17,075 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 398abb2cc49d48718b6d75448f7cda1d, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732617194180 2024-11-26T10:33:17,076 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,077 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting e808e7ecb9e140b495c66e945bf9f70f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732617194371 2024-11-26T10:33:17,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-26T10:33:17,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:17,077 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-26T10:33:17,077 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b8ded2282a824f279bd554ad8b5b596e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617195582 2024-11-26T10:33:17,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:17,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:17,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:17,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:17,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:17,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:17,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/5da30acb7eba43a9aa588fa08641de0d is 50, key is test_row_0/A:col10/1732617196018/Put/seqid=0 2024-11-26T10:33:17,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741850_1026 (size=12001) 2024-11-26T10:33:17,111 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/5da30acb7eba43a9aa588fa08641de0d 2024-11-26T10:33:17,123 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#C#compaction#12 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:17,124 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/f3be9e6d81134218a818f3f35e0aff68 is 50, key is test_row_0/C:col10/1732617195585/Put/seqid=0 2024-11-26T10:33:17,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/b5fecc90d3db44dea7c24b94338482c9 is 50, key is test_row_0/B:col10/1732617196018/Put/seqid=0 2024-11-26T10:33:17,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741851_1027 (size=12104) 2024-11-26T10:33:17,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:17,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:17,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741852_1028 (size=12001) 2024-11-26T10:33:17,172 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/b5fecc90d3db44dea7c24b94338482c9 2024-11-26T10:33:17,184 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/f3be9e6d81134218a818f3f35e0aff68 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/f3be9e6d81134218a818f3f35e0aff68 2024-11-26T10:33:17,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/4f1af6f0d83d4affa902d16a6112a0a9 is 50, key is test_row_0/C:col10/1732617196018/Put/seqid=0 2024-11-26T10:33:17,206 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/C of 6a931c8e80842c8947954ecd8357e9ad into f3be9e6d81134218a818f3f35e0aff68(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:17,206 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:17,206 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/C, priority=13, startTime=1732617196933; duration=0sec 2024-11-26T10:33:17,207 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:17,207 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:17,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617257188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617257191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617257193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617257197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617257199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741853_1029 (size=12001) 2024-11-26T10:33:17,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617257312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617257312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617257313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617257314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617257315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-26T10:33:17,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617257526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617257528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617257529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617257530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617257530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,618 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/4f1af6f0d83d4affa902d16a6112a0a9 2024-11-26T10:33:17,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/5da30acb7eba43a9aa588fa08641de0d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/5da30acb7eba43a9aa588fa08641de0d 2024-11-26T10:33:17,651 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/5da30acb7eba43a9aa588fa08641de0d, entries=150, sequenceid=76, filesize=11.7 K 2024-11-26T10:33:17,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/b5fecc90d3db44dea7c24b94338482c9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b5fecc90d3db44dea7c24b94338482c9 2024-11-26T10:33:17,667 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b5fecc90d3db44dea7c24b94338482c9, entries=150, sequenceid=76, filesize=11.7 K 2024-11-26T10:33:17,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/4f1af6f0d83d4affa902d16a6112a0a9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4f1af6f0d83d4affa902d16a6112a0a9 2024-11-26T10:33:17,699 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4f1af6f0d83d4affa902d16a6112a0a9, entries=150, sequenceid=76, filesize=11.7 K 2024-11-26T10:33:17,702 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 6a931c8e80842c8947954ecd8357e9ad in 624ms, sequenceid=76, compaction requested=false 2024-11-26T10:33:17,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:17,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:17,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-26T10:33:17,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-26T10:33:17,707 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-26T10:33:17,708 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4070 sec 2024-11-26T10:33:17,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.4180 sec 2024-11-26T10:33:17,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:17,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-26T10:33:17,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:17,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:17,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:17,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:17,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:17,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:17,848 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/565980643b87436aabc58c9e36403499 is 50, key is test_row_1/A:col10/1732617197833/Put/seqid=0 2024-11-26T10:33:17,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617257873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617257876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617257877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617257878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617257880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741854_1030 (size=9657) 2024-11-26T10:33:17,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/565980643b87436aabc58c9e36403499 2024-11-26T10:33:17,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/c43165e599014bbeba7d8a1758ee1b74 is 50, key is test_row_1/B:col10/1732617197833/Put/seqid=0 2024-11-26T10:33:17,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741855_1031 (size=9657) 2024-11-26T10:33:17,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/c43165e599014bbeba7d8a1758ee1b74 2024-11-26T10:33:17,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/1c0333ef61dc49cb8ce8fa3826c53d88 is 50, key is test_row_1/C:col10/1732617197833/Put/seqid=0 2024-11-26T10:33:17,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741856_1032 (size=9657) 2024-11-26T10:33:17,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/1c0333ef61dc49cb8ce8fa3826c53d88 2024-11-26T10:33:17,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617257981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617257988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617257988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617257988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:17,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617257989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:17,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/565980643b87436aabc58c9e36403499 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/565980643b87436aabc58c9e36403499 2024-11-26T10:33:18,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/565980643b87436aabc58c9e36403499, entries=100, sequenceid=93, filesize=9.4 K 2024-11-26T10:33:18,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/c43165e599014bbeba7d8a1758ee1b74 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/c43165e599014bbeba7d8a1758ee1b74 2024-11-26T10:33:18,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/c43165e599014bbeba7d8a1758ee1b74, entries=100, sequenceid=93, filesize=9.4 K 2024-11-26T10:33:18,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/1c0333ef61dc49cb8ce8fa3826c53d88 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1c0333ef61dc49cb8ce8fa3826c53d88 2024-11-26T10:33:18,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1c0333ef61dc49cb8ce8fa3826c53d88, entries=100, sequenceid=93, filesize=9.4 K 2024-11-26T10:33:18,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=127.47 KB/130530 for 6a931c8e80842c8947954ecd8357e9ad in 211ms, sequenceid=93, compaction requested=true 2024-11-26T10:33:18,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:18,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:18,050 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:18,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:18,050 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:18,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:18,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:18,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:18,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:18,052 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:18,053 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:18,053 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:18,053 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/11b06e87694b4c46a8ef4ac327afd461, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b5fecc90d3db44dea7c24b94338482c9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/c43165e599014bbeba7d8a1758ee1b74] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=33.0 K 2024-11-26T10:33:18,054 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:18,054 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:18,054 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:18,054 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/2ff3cb97124c4d0d8101e3bee5de081f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/5da30acb7eba43a9aa588fa08641de0d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/565980643b87436aabc58c9e36403499] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=33.0 K 2024-11-26T10:33:18,055 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 11b06e87694b4c46a8ef4ac327afd461, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617195582 2024-11-26T10:33:18,055 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ff3cb97124c4d0d8101e3bee5de081f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617195582 2024-11-26T10:33:18,055 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b5fecc90d3db44dea7c24b94338482c9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732617196018 2024-11-26T10:33:18,056 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting c43165e599014bbeba7d8a1758ee1b74, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732617197833 2024-11-26T10:33:18,056 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5da30acb7eba43a9aa588fa08641de0d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732617196018 2024-11-26T10:33:18,058 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 565980643b87436aabc58c9e36403499, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732617197833 2024-11-26T10:33:18,090 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#18 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:18,090 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/46562d5f369a4a48a966a4bc6df5a179 is 50, key is test_row_0/A:col10/1732617196018/Put/seqid=0 2024-11-26T10:33:18,099 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#19 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:18,099 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/3dcbf60bd3c54ca3af25e8d89519ee59 is 50, key is test_row_0/B:col10/1732617196018/Put/seqid=0 2024-11-26T10:33:18,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741857_1033 (size=12207) 2024-11-26T10:33:18,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741858_1034 (size=12207) 2024-11-26T10:33:18,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:18,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-26T10:33:18,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:18,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:18,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:18,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:18,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:18,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:18,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/6ac301cb95de4d4ab6358e9f760a69e9 is 50, key is test_row_0/A:col10/1732617197875/Put/seqid=0 2024-11-26T10:33:18,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617258214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617258219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617258214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617258219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617258223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741859_1035 (size=12001) 2024-11-26T10:33:18,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/6ac301cb95de4d4ab6358e9f760a69e9 2024-11-26T10:33:18,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/2b626ef2efba41439ab483dd9b543ba9 is 50, key is test_row_0/B:col10/1732617197875/Put/seqid=0 2024-11-26T10:33:18,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741860_1036 (size=12001) 2024-11-26T10:33:18,273 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/2b626ef2efba41439ab483dd9b543ba9 2024-11-26T10:33:18,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/42976dfb274049108aa5da1aeb31a7f9 is 50, key is test_row_0/C:col10/1732617197875/Put/seqid=0 2024-11-26T10:33:18,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741861_1037 (size=12001) 2024-11-26T10:33:18,315 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/42976dfb274049108aa5da1aeb31a7f9 2024-11-26T10:33:18,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617258327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617258327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617258328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/6ac301cb95de4d4ab6358e9f760a69e9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac301cb95de4d4ab6358e9f760a69e9 2024-11-26T10:33:18,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617258328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617258328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac301cb95de4d4ab6358e9f760a69e9, entries=150, sequenceid=116, filesize=11.7 K 2024-11-26T10:33:18,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/2b626ef2efba41439ab483dd9b543ba9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/2b626ef2efba41439ab483dd9b543ba9 2024-11-26T10:33:18,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/2b626ef2efba41439ab483dd9b543ba9, entries=150, sequenceid=116, filesize=11.7 K 2024-11-26T10:33:18,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/42976dfb274049108aa5da1aeb31a7f9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/42976dfb274049108aa5da1aeb31a7f9 2024-11-26T10:33:18,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/42976dfb274049108aa5da1aeb31a7f9, entries=150, sequenceid=116, filesize=11.7 K 2024-11-26T10:33:18,377 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6a931c8e80842c8947954ecd8357e9ad in 184ms, sequenceid=116, compaction requested=true 2024-11-26T10:33:18,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:18,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:18,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-26T10:33:18,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:18,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-26T10:33:18,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:18,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-26T10:33:18,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-26T10:33:18,404 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-26T10:33:18,408 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:18,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-26T10:33:18,411 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:18,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-26T10:33:18,412 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:18,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:18,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-26T10:33:18,520 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/46562d5f369a4a48a966a4bc6df5a179 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/46562d5f369a4a48a966a4bc6df5a179 2024-11-26T10:33:18,530 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/3dcbf60bd3c54ca3af25e8d89519ee59 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/3dcbf60bd3c54ca3af25e8d89519ee59 2024-11-26T10:33:18,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:18,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-26T10:33:18,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:18,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:18,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:18,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:18,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:18,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:18,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/0369380f4cd14e769c15403052a6e651 is 50, key is test_row_0/A:col10/1732617198220/Put/seqid=0 2024-11-26T10:33:18,548 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into 3dcbf60bd3c54ca3af25e8d89519ee59(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:18,549 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:18,549 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=13, startTime=1732617198050; duration=0sec 2024-11-26T10:33:18,549 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-26T10:33:18,549 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:18,550 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-11-26T10:33:18,550 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-26T10:33:18,551 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-26T10:33:18,551 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. because compaction request was cancelled 2024-11-26T10:33:18,551 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:18,551 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:18,551 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into 46562d5f369a4a48a966a4bc6df5a179(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:18,551 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:18,552 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=13, startTime=1732617198050; duration=0sec 2024-11-26T10:33:18,552 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-26T10:33:18,552 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:18,552 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:18,553 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:18,553 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/C is initiating minor compaction (all files) 2024-11-26T10:33:18,553 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/C in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:18,553 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-11-26T10:33:18,553 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-26T10:33:18,553 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-26T10:33:18,553 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. because compaction request was cancelled 2024-11-26T10:33:18,553 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:18,554 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-26T10:33:18,554 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/f3be9e6d81134218a818f3f35e0aff68, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4f1af6f0d83d4affa902d16a6112a0a9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1c0333ef61dc49cb8ce8fa3826c53d88, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/42976dfb274049108aa5da1aeb31a7f9] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=44.7 K 2024-11-26T10:33:18,556 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-26T10:33:18,556 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-26T10:33:18,556 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting f3be9e6d81134218a818f3f35e0aff68, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617195582 2024-11-26T10:33:18,556 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. because compaction request was cancelled 2024-11-26T10:33:18,557 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:18,557 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f1af6f0d83d4affa902d16a6112a0a9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732617196018 2024-11-26T10:33:18,558 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c0333ef61dc49cb8ce8fa3826c53d88, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732617197833 2024-11-26T10:33:18,558 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 42976dfb274049108aa5da1aeb31a7f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732617197875 2024-11-26T10:33:18,568 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-26T10:33:18,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:18,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:18,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:18,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:18,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:18,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:18,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617258578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617258578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617258580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617258581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741862_1038 (size=14391) 2024-11-26T10:33:18,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/0369380f4cd14e769c15403052a6e651 2024-11-26T10:33:18,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617258588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,609 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#C#compaction#24 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:18,610 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/369e824438d6472b8bd73d75037d37e8 is 50, key is test_row_0/C:col10/1732617197875/Put/seqid=0 2024-11-26T10:33:18,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/17a20d9859f04082b4547325515ac69d is 50, key is test_row_0/B:col10/1732617198220/Put/seqid=0 2024-11-26T10:33:18,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741863_1039 (size=12241) 2024-11-26T10:33:18,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741864_1040 (size=12051) 2024-11-26T10:33:18,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/17a20d9859f04082b4547325515ac69d 2024-11-26T10:33:18,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e66901f60da3432d84657cf6f1a5ca37 is 50, key is test_row_0/C:col10/1732617198220/Put/seqid=0 2024-11-26T10:33:18,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741865_1041 (size=12051) 2024-11-26T10:33:18,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e66901f60da3432d84657cf6f1a5ca37 2024-11-26T10:33:18,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617258691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617258691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617258692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617258692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617258697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/0369380f4cd14e769c15403052a6e651 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/0369380f4cd14e769c15403052a6e651 2024-11-26T10:33:18,715 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/0369380f4cd14e769c15403052a6e651, entries=200, sequenceid=130, filesize=14.1 K 2024-11-26T10:33:18,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-26T10:33:18,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/17a20d9859f04082b4547325515ac69d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17a20d9859f04082b4547325515ac69d 2024-11-26T10:33:18,723 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-26T10:33:18,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:18,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:18,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:18,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:18,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:18,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:18,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17a20d9859f04082b4547325515ac69d, entries=150, sequenceid=130, filesize=11.8 K 2024-11-26T10:33:18,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e66901f60da3432d84657cf6f1a5ca37 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e66901f60da3432d84657cf6f1a5ca37 2024-11-26T10:33:18,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e66901f60da3432d84657cf6f1a5ca37, entries=150, sequenceid=130, filesize=11.8 K 2024-11-26T10:33:18,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 6a931c8e80842c8947954ecd8357e9ad in 215ms, sequenceid=130, compaction requested=true 2024-11-26T10:33:18,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:18,749 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:18,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:18,751 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38599 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:18,751 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:18,751 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:18,752 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/46562d5f369a4a48a966a4bc6df5a179, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac301cb95de4d4ab6358e9f760a69e9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/0369380f4cd14e769c15403052a6e651] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=37.7 K 2024-11-26T10:33:18,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:18,752 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46562d5f369a4a48a966a4bc6df5a179, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732617196018 2024-11-26T10:33:18,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:18,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:18,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:18,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-26T10:33:18,753 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ac301cb95de4d4ab6358e9f760a69e9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732617197875 2024-11-26T10:33:18,754 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0369380f4cd14e769c15403052a6e651, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732617198214 2024-11-26T10:33:18,778 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#27 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:18,779 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/d531626397d9487d9c107443fb560921 is 50, key is test_row_0/A:col10/1732617198220/Put/seqid=0 2024-11-26T10:33:18,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741866_1042 (size=12359) 2024-11-26T10:33:18,802 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/d531626397d9487d9c107443fb560921 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/d531626397d9487d9c107443fb560921 2024-11-26T10:33:18,818 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into d531626397d9487d9c107443fb560921(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:18,818 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:18,818 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=13, startTime=1732617198749; duration=0sec 2024-11-26T10:33:18,819 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-26T10:33:18,819 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:18,819 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 4 compacting, 1 eligible, 16 blocking 2024-11-26T10:33:18,821 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-26T10:33:18,821 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-26T10:33:18,821 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. because compaction request was cancelled 2024-11-26T10:33:18,821 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:18,821 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:18,823 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:18,823 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:18,823 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:18,824 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/3dcbf60bd3c54ca3af25e8d89519ee59, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/2b626ef2efba41439ab483dd9b543ba9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17a20d9859f04082b4547325515ac69d] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=35.4 K 2024-11-26T10:33:18,824 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3dcbf60bd3c54ca3af25e8d89519ee59, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732617196018 2024-11-26T10:33:18,825 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b626ef2efba41439ab483dd9b543ba9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732617197875 2024-11-26T10:33:18,826 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17a20d9859f04082b4547325515ac69d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732617198215 2024-11-26T10:33:18,846 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:18,847 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/7276c8ccd07a417694c9d7a4342b910c is 50, key is test_row_0/B:col10/1732617198220/Put/seqid=0 2024-11-26T10:33:18,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741867_1043 (size=12359) 2024-11-26T10:33:18,879 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-26T10:33:18,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:18,880 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-26T10:33:18,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:18,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:18,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:18,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:18,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:18,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:18,888 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/7276c8ccd07a417694c9d7a4342b910c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/7276c8ccd07a417694c9d7a4342b910c 2024-11-26T10:33:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:18,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:18,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/567247a7157949dd9fddc725ab217955 is 50, key is test_row_0/A:col10/1732617198560/Put/seqid=0 2024-11-26T10:33:18,913 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into 7276c8ccd07a417694c9d7a4342b910c(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:18,913 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:18,913 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=13, startTime=1732617198752; duration=0sec 2024-11-26T10:33:18,913 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:18,913 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:18,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617258917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617258918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617258921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617258922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741868_1044 (size=12151) 2024-11-26T10:33:18,929 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/567247a7157949dd9fddc725ab217955 2024-11-26T10:33:18,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617258923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:18,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/9368a3e55623410ea237fbc1f479a08d is 50, key is test_row_0/B:col10/1732617198560/Put/seqid=0 2024-11-26T10:33:18,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741869_1045 (size=12151) 2024-11-26T10:33:18,972 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/9368a3e55623410ea237fbc1f479a08d 2024-11-26T10:33:18,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/a4c067f22df4485a9f25a940e7d5b06a is 50, key is test_row_0/C:col10/1732617198560/Put/seqid=0 2024-11-26T10:33:19,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741870_1046 (size=12151) 2024-11-26T10:33:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-26T10:33:19,016 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/a4c067f22df4485a9f25a940e7d5b06a 2024-11-26T10:33:19,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617259025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617259026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617259027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617259028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/567247a7157949dd9fddc725ab217955 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/567247a7157949dd9fddc725ab217955 2024-11-26T10:33:19,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617259032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,050 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/369e824438d6472b8bd73d75037d37e8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/369e824438d6472b8bd73d75037d37e8 2024-11-26T10:33:19,062 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/C of 6a931c8e80842c8947954ecd8357e9ad into 369e824438d6472b8bd73d75037d37e8(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:19,063 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:19,063 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/C, priority=12, startTime=1732617198378; duration=0sec 2024-11-26T10:33:19,063 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:19,063 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:19,067 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/567247a7157949dd9fddc725ab217955, entries=150, sequenceid=155, filesize=11.9 K 2024-11-26T10:33:19,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/9368a3e55623410ea237fbc1f479a08d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9368a3e55623410ea237fbc1f479a08d 2024-11-26T10:33:19,087 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9368a3e55623410ea237fbc1f479a08d, entries=150, sequenceid=155, filesize=11.9 K 2024-11-26T10:33:19,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/a4c067f22df4485a9f25a940e7d5b06a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a4c067f22df4485a9f25a940e7d5b06a 2024-11-26T10:33:19,104 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a4c067f22df4485a9f25a940e7d5b06a, entries=150, sequenceid=155, filesize=11.9 K 2024-11-26T10:33:19,106 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 6a931c8e80842c8947954ecd8357e9ad in 226ms, sequenceid=155, compaction requested=true 2024-11-26T10:33:19,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:19,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:19,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-26T10:33:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-26T10:33:19,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-26T10:33:19,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 696 msec 2024-11-26T10:33:19,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 705 msec 2024-11-26T10:33:19,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:19,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-26T10:33:19,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:19,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:19,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:19,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:19,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:19,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:19,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/46f771c0db9546148b03c9a42c1999d2 is 50, key is test_row_0/A:col10/1732617199231/Put/seqid=0 2024-11-26T10:33:19,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741871_1047 (size=12151) 2024-11-26T10:33:19,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617259303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617259303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617259303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617259305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617259306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617259411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617259411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617259412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617259413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617259413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-26T10:33:19,519 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-26T10:33:19,521 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:19,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-26T10:33:19,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-26T10:33:19,525 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:19,526 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:19,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:19,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617259615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617259617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617259619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617259619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617259621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-26T10:33:19,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/46f771c0db9546148b03c9a42c1999d2 2024-11-26T10:33:19,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/708018327f13437aa2935e390b795ebb is 50, key is test_row_0/B:col10/1732617199231/Put/seqid=0 2024-11-26T10:33:19,679 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-26T10:33:19,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:19,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:19,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:19,681 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:19,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:19,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:19,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741872_1048 (size=12151) 2024-11-26T10:33:19,688 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/708018327f13437aa2935e390b795ebb 2024-11-26T10:33:19,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/f52af82ae0c54109ada088bf62ad123f is 50, key is test_row_0/C:col10/1732617199231/Put/seqid=0 2024-11-26T10:33:19,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741873_1049 (size=12151) 2024-11-26T10:33:19,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/f52af82ae0c54109ada088bf62ad123f 2024-11-26T10:33:19,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/46f771c0db9546148b03c9a42c1999d2 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/46f771c0db9546148b03c9a42c1999d2 2024-11-26T10:33:19,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/46f771c0db9546148b03c9a42c1999d2, entries=150, sequenceid=172, filesize=11.9 K 2024-11-26T10:33:19,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/708018327f13437aa2935e390b795ebb as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/708018327f13437aa2935e390b795ebb 2024-11-26T10:33:19,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/708018327f13437aa2935e390b795ebb, entries=150, sequenceid=172, filesize=11.9 K 2024-11-26T10:33:19,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/f52af82ae0c54109ada088bf62ad123f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/f52af82ae0c54109ada088bf62ad123f 2024-11-26T10:33:19,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/f52af82ae0c54109ada088bf62ad123f, entries=150, sequenceid=172, filesize=11.9 K 2024-11-26T10:33:19,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 6a931c8e80842c8947954ecd8357e9ad in 551ms, sequenceid=172, compaction requested=true 2024-11-26T10:33:19,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:19,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:19,785 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:19,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:19,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:19,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:19,785 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:19,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:19,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:19,787 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:19,787 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:19,787 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:19,787 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:19,787 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:19,787 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:19,787 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/d531626397d9487d9c107443fb560921, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/567247a7157949dd9fddc725ab217955, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/46f771c0db9546148b03c9a42c1999d2] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=35.8 K 2024-11-26T10:33:19,787 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/7276c8ccd07a417694c9d7a4342b910c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9368a3e55623410ea237fbc1f479a08d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/708018327f13437aa2935e390b795ebb] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=35.8 K 2024-11-26T10:33:19,789 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting d531626397d9487d9c107443fb560921, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732617198215 2024-11-26T10:33:19,789 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 7276c8ccd07a417694c9d7a4342b910c, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732617198215 2024-11-26T10:33:19,790 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 567247a7157949dd9fddc725ab217955, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732617198560 2024-11-26T10:33:19,790 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 9368a3e55623410ea237fbc1f479a08d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732617198560 2024-11-26T10:33:19,790 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46f771c0db9546148b03c9a42c1999d2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732617198914 2024-11-26T10:33:19,790 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 708018327f13437aa2935e390b795ebb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732617198914 2024-11-26T10:33:19,813 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#35 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:19,814 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/44413a28a80a43b589c7ef6822d5cc05 is 50, key is test_row_0/B:col10/1732617199231/Put/seqid=0 2024-11-26T10:33:19,824 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#36 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:19,825 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/7c7ffa2db9ad46be9618a74fe33f8864 is 50, key is test_row_0/A:col10/1732617199231/Put/seqid=0 2024-11-26T10:33:19,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-26T10:33:19,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741874_1050 (size=12561) 2024-11-26T10:33:19,835 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-26T10:33:19,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:19,836 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-26T10:33:19,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:19,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:19,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:19,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:19,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:19,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:19,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741875_1051 (size=12561) 2024-11-26T10:33:19,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/a0ef14091e624b8c8192066cc9c8f2a9 is 50, key is test_row_0/A:col10/1732617199305/Put/seqid=0 2024-11-26T10:33:19,863 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/7c7ffa2db9ad46be9618a74fe33f8864 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/7c7ffa2db9ad46be9618a74fe33f8864 2024-11-26T10:33:19,881 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into 7c7ffa2db9ad46be9618a74fe33f8864(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:19,881 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:19,881 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=13, startTime=1732617199785; duration=0sec 2024-11-26T10:33:19,882 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:19,882 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:19,882 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:19,885 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48594 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:19,885 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/C is initiating minor compaction (all files) 2024-11-26T10:33:19,886 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/C in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:19,886 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/369e824438d6472b8bd73d75037d37e8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e66901f60da3432d84657cf6f1a5ca37, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a4c067f22df4485a9f25a940e7d5b06a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/f52af82ae0c54109ada088bf62ad123f] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=47.5 K 2024-11-26T10:33:19,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741876_1052 (size=12151) 2024-11-26T10:33:19,887 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 369e824438d6472b8bd73d75037d37e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732617197875 2024-11-26T10:33:19,888 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting e66901f60da3432d84657cf6f1a5ca37, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732617198215 2024-11-26T10:33:19,890 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4c067f22df4485a9f25a940e7d5b06a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732617198560 2024-11-26T10:33:19,890 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting f52af82ae0c54109ada088bf62ad123f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732617198914 2024-11-26T10:33:19,893 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/a0ef14091e624b8c8192066cc9c8f2a9 2024-11-26T10:33:19,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/1feeae9316774cad9b1afddb2d844685 is 50, key is test_row_0/B:col10/1732617199305/Put/seqid=0 2024-11-26T10:33:19,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:19,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:19,925 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#C#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:19,926 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/8c5790d18b9a4f79913bca1faa0afde1 is 50, key is test_row_0/C:col10/1732617199231/Put/seqid=0 2024-11-26T10:33:19,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741877_1053 (size=12151) 2024-11-26T10:33:19,942 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/1feeae9316774cad9b1afddb2d844685 2024-11-26T10:33:19,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617259939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617259941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617259941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617259942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617259943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:19,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741878_1054 (size=12527) 2024-11-26T10:33:19,971 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/8c5790d18b9a4f79913bca1faa0afde1 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/8c5790d18b9a4f79913bca1faa0afde1 2024-11-26T10:33:19,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/543aedca104e48679af3fd3ad13824cf is 50, key is test_row_0/C:col10/1732617199305/Put/seqid=0 2024-11-26T10:33:19,983 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/C of 6a931c8e80842c8947954ecd8357e9ad into 8c5790d18b9a4f79913bca1faa0afde1(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:19,983 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:19,983 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/C, priority=12, startTime=1732617199785; duration=0sec 2024-11-26T10:33:19,984 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:19,984 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:19,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741879_1055 (size=12151) 2024-11-26T10:33:20,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617260046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617260047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617260048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617260048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617260051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-26T10:33:20,239 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/44413a28a80a43b589c7ef6822d5cc05 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/44413a28a80a43b589c7ef6822d5cc05 2024-11-26T10:33:20,251 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into 44413a28a80a43b589c7ef6822d5cc05(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:20,251 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:20,251 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=13, startTime=1732617199785; duration=0sec 2024-11-26T10:33:20,251 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:20,252 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:20,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617260250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617260252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617260252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617260253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617260258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,399 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/543aedca104e48679af3fd3ad13824cf 2024-11-26T10:33:20,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/a0ef14091e624b8c8192066cc9c8f2a9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/a0ef14091e624b8c8192066cc9c8f2a9 2024-11-26T10:33:20,417 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/a0ef14091e624b8c8192066cc9c8f2a9, entries=150, sequenceid=193, filesize=11.9 K 2024-11-26T10:33:20,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/1feeae9316774cad9b1afddb2d844685 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/1feeae9316774cad9b1afddb2d844685 2024-11-26T10:33:20,428 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/1feeae9316774cad9b1afddb2d844685, entries=150, sequenceid=193, filesize=11.9 K 2024-11-26T10:33:20,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/543aedca104e48679af3fd3ad13824cf as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/543aedca104e48679af3fd3ad13824cf 2024-11-26T10:33:20,447 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/543aedca104e48679af3fd3ad13824cf, entries=150, sequenceid=193, filesize=11.9 K 2024-11-26T10:33:20,449 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 6a931c8e80842c8947954ecd8357e9ad in 613ms, sequenceid=193, compaction requested=false 2024-11-26T10:33:20,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:20,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:20,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-26T10:33:20,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-26T10:33:20,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-26T10:33:20,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 925 msec 2024-11-26T10:33:20,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 933 msec 2024-11-26T10:33:20,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:20,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-26T10:33:20,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:20,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:20,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:20,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:20,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:20,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:20,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/c5c6a89542a6482c88a60625fe465903 is 50, key is test_row_0/A:col10/1732617199941/Put/seqid=0 2024-11-26T10:33:20,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617260586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617260587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617260587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617260589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617260589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741880_1056 (size=14541) 2024-11-26T10:33:20,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-26T10:33:20,629 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-26T10:33:20,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:20,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-26T10:33:20,634 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:20,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-26T10:33:20,636 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:20,636 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:20,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617260691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617260695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617260696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617260698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617260698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-26T10:33:20,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-26T10:33:20,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:20,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:20,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:20,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:20,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:20,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:20,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617260896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617260900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617260901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617260910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:20,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617260910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-26T10:33:20,942 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:20,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-26T10:33:20,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:20,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:20,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:20,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:20,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:20,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:21,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/c5c6a89542a6482c88a60625fe465903 2024-11-26T10:33:21,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/8be353fc75bb4982ae31b8da8e4f2b17 is 50, key is test_row_0/B:col10/1732617199941/Put/seqid=0 2024-11-26T10:33:21,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741881_1057 (size=12151) 2024-11-26T10:33:21,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/8be353fc75bb4982ae31b8da8e4f2b17 2024-11-26T10:33:21,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/bab66df00b63458494b13f6245dbc828 is 50, key is test_row_0/C:col10/1732617199941/Put/seqid=0 2024-11-26T10:33:21,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741882_1058 (size=12151) 2024-11-26T10:33:21,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/bab66df00b63458494b13f6245dbc828 2024-11-26T10:33:21,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/c5c6a89542a6482c88a60625fe465903 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c5c6a89542a6482c88a60625fe465903 2024-11-26T10:33:21,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c5c6a89542a6482c88a60625fe465903, entries=200, sequenceid=214, filesize=14.2 K 2024-11-26T10:33:21,097 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-26T10:33:21,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:21,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/8be353fc75bb4982ae31b8da8e4f2b17 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/8be353fc75bb4982ae31b8da8e4f2b17 2024-11-26T10:33:21,098 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:21,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:21,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:21,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/8be353fc75bb4982ae31b8da8e4f2b17, entries=150, sequenceid=214, filesize=11.9 K 2024-11-26T10:33:21,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/bab66df00b63458494b13f6245dbc828 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/bab66df00b63458494b13f6245dbc828 2024-11-26T10:33:21,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/bab66df00b63458494b13f6245dbc828, entries=150, sequenceid=214, filesize=11.9 K 2024-11-26T10:33:21,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 6a931c8e80842c8947954ecd8357e9ad in 561ms, sequenceid=214, compaction requested=true 2024-11-26T10:33:21,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:21,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:21,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:21,123 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:21,123 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:21,125 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:21,125 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39253 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:21,125 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:21,125 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:21,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:21,125 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,125 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,125 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/44413a28a80a43b589c7ef6822d5cc05, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/1feeae9316774cad9b1afddb2d844685, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/8be353fc75bb4982ae31b8da8e4f2b17] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=36.0 K 2024-11-26T10:33:21,125 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/7c7ffa2db9ad46be9618a74fe33f8864, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/a0ef14091e624b8c8192066cc9c8f2a9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c5c6a89542a6482c88a60625fe465903] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=38.3 K 2024-11-26T10:33:21,126 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 44413a28a80a43b589c7ef6822d5cc05, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732617198914 2024-11-26T10:33:21,126 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c7ffa2db9ad46be9618a74fe33f8864, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732617198914 2024-11-26T10:33:21,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:21,126 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1feeae9316774cad9b1afddb2d844685, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732617199266 2024-11-26T10:33:21,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:21,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:21,127 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 8be353fc75bb4982ae31b8da8e4f2b17, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617199941 2024-11-26T10:33:21,127 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0ef14091e624b8c8192066cc9c8f2a9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732617199266 2024-11-26T10:33:21,129 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5c6a89542a6482c88a60625fe465903, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617199938 2024-11-26T10:33:21,145 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#44 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:21,147 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/1d767e67856147f382e034042d14c2ce is 50, key is test_row_0/A:col10/1732617199941/Put/seqid=0 2024-11-26T10:33:21,148 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#45 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:21,148 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/9f54751241c84af8b9c55cabe1217ccc is 50, key is test_row_0/B:col10/1732617199941/Put/seqid=0 2024-11-26T10:33:21,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741883_1059 (size=12663) 2024-11-26T10:33:21,178 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/1d767e67856147f382e034042d14c2ce as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/1d767e67856147f382e034042d14c2ce 2024-11-26T10:33:21,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741884_1060 (size=12663) 2024-11-26T10:33:21,192 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into 1d767e67856147f382e034042d14c2ce(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:21,192 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:21,192 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=13, startTime=1732617201123; duration=0sec 2024-11-26T10:33:21,192 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:21,192 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:21,192 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:21,195 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/9f54751241c84af8b9c55cabe1217ccc as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9f54751241c84af8b9c55cabe1217ccc 2024-11-26T10:33:21,195 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:21,195 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/C is initiating minor compaction (all files) 2024-11-26T10:33:21,195 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/C in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,195 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/8c5790d18b9a4f79913bca1faa0afde1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/543aedca104e48679af3fd3ad13824cf, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/bab66df00b63458494b13f6245dbc828] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=36.0 K 2024-11-26T10:33:21,196 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c5790d18b9a4f79913bca1faa0afde1, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732617198914 2024-11-26T10:33:21,197 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 543aedca104e48679af3fd3ad13824cf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732617199266 2024-11-26T10:33:21,197 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting bab66df00b63458494b13f6245dbc828, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617199941 2024-11-26T10:33:21,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:21,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-26T10:33:21,206 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into 9f54751241c84af8b9c55cabe1217ccc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:21,206 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:21,206 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=13, startTime=1732617201123; duration=0sec 2024-11-26T10:33:21,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:21,206 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:21,206 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:21,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:21,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:21,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/c4ada2985a7146f08d482ccc41b7e113 is 50, key is test_row_0/A:col10/1732617200587/Put/seqid=0 2024-11-26T10:33:21,225 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#C#compaction#47 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:21,225 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/253e67d32e0e400c947f5ebf79a9fc40 is 50, key is test_row_0/C:col10/1732617199941/Put/seqid=0 2024-11-26T10:33:21,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617261230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617261230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617261232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741885_1061 (size=12629) 2024-11-26T10:33:21,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-26T10:33:21,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617261235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617261233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,247 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/253e67d32e0e400c947f5ebf79a9fc40 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/253e67d32e0e400c947f5ebf79a9fc40 2024-11-26T10:33:21,252 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,252 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-26T10:33:21,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:21,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,253 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:21,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:21,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:21,259 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/C of 6a931c8e80842c8947954ecd8357e9ad into 253e67d32e0e400c947f5ebf79a9fc40(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:21,260 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:21,260 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/C, priority=13, startTime=1732617201126; duration=0sec 2024-11-26T10:33:21,260 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:21,260 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:21,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741886_1062 (size=14541) 2024-11-26T10:33:21,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/c4ada2985a7146f08d482ccc41b7e113 2024-11-26T10:33:21,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/01cec3fba0634940b16d6058d92a8277 is 50, key is test_row_0/B:col10/1732617200587/Put/seqid=0 2024-11-26T10:33:21,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741887_1063 (size=12151) 2024-11-26T10:33:21,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/01cec3fba0634940b16d6058d92a8277 2024-11-26T10:33:21,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/dac6b4717845476e817110decb445c14 is 50, key is test_row_0/C:col10/1732617200587/Put/seqid=0 2024-11-26T10:33:21,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741888_1064 (size=12151) 2024-11-26T10:33:21,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/dac6b4717845476e817110decb445c14 2024-11-26T10:33:21,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617261337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617261337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617261341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617261342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617261343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/c4ada2985a7146f08d482ccc41b7e113 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c4ada2985a7146f08d482ccc41b7e113 2024-11-26T10:33:21,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c4ada2985a7146f08d482ccc41b7e113, entries=200, sequenceid=235, filesize=14.2 K 2024-11-26T10:33:21,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/01cec3fba0634940b16d6058d92a8277 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/01cec3fba0634940b16d6058d92a8277 2024-11-26T10:33:21,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/01cec3fba0634940b16d6058d92a8277, entries=150, sequenceid=235, filesize=11.9 K 2024-11-26T10:33:21,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/dac6b4717845476e817110decb445c14 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/dac6b4717845476e817110decb445c14 2024-11-26T10:33:21,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/dac6b4717845476e817110decb445c14, entries=150, sequenceid=235, filesize=11.9 K 2024-11-26T10:33:21,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 6a931c8e80842c8947954ecd8357e9ad in 195ms, sequenceid=235, compaction requested=false 2024-11-26T10:33:21,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:21,406 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-26T10:33:21,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,408 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-26T10:33:21,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:21,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:21,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:21,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/e5a4029ac9014bd0ae93a97e97dc9ad6 is 50, key is test_row_0/A:col10/1732617201231/Put/seqid=0 2024-11-26T10:33:21,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741889_1065 (size=12151) 2024-11-26T10:33:21,431 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/e5a4029ac9014bd0ae93a97e97dc9ad6 2024-11-26T10:33:21,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/e02ba23c08df4311bcd363ed228ae7e6 is 50, key is test_row_0/B:col10/1732617201231/Put/seqid=0 2024-11-26T10:33:21,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741890_1066 (size=12151) 2024-11-26T10:33:21,462 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/e02ba23c08df4311bcd363ed228ae7e6 2024-11-26T10:33:21,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/14194599004b4968a5cb09dbfc0b4131 is 50, key is test_row_0/C:col10/1732617201231/Put/seqid=0 2024-11-26T10:33:21,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741891_1067 (size=12151) 2024-11-26T10:33:21,501 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/14194599004b4968a5cb09dbfc0b4131 2024-11-26T10:33:21,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/e5a4029ac9014bd0ae93a97e97dc9ad6 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/e5a4029ac9014bd0ae93a97e97dc9ad6 2024-11-26T10:33:21,525 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/e5a4029ac9014bd0ae93a97e97dc9ad6, entries=150, sequenceid=253, filesize=11.9 K 2024-11-26T10:33:21,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/e02ba23c08df4311bcd363ed228ae7e6 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/e02ba23c08df4311bcd363ed228ae7e6 2024-11-26T10:33:21,539 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/e02ba23c08df4311bcd363ed228ae7e6, entries=150, sequenceid=253, filesize=11.9 K 2024-11-26T10:33:21,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/14194599004b4968a5cb09dbfc0b4131 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/14194599004b4968a5cb09dbfc0b4131 2024-11-26T10:33:21,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:21,551 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/14194599004b4968a5cb09dbfc0b4131, entries=150, sequenceid=253, filesize=11.9 K 2024-11-26T10:33:21,557 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=33.54 KB/34350 for 6a931c8e80842c8947954ecd8357e9ad in 148ms, sequenceid=253, compaction requested=true 2024-11-26T10:33:21,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:21,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-26T10:33:21,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-26T10:33:21,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-26T10:33:21,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 923 msec 2024-11-26T10:33:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:21,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-26T10:33:21,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:21,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:21,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:21,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 931 msec 2024-11-26T10:33:21,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/ed35e0ca9998430caa077c85dc9169c8 is 50, key is test_row_1/A:col10/1732617201560/Put/seqid=0 2024-11-26T10:33:21,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741892_1068 (size=17177) 2024-11-26T10:33:21,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/ed35e0ca9998430caa077c85dc9169c8 2024-11-26T10:33:21,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/80294c9ff5d74b2da078050a35c10efb is 50, key is test_row_1/B:col10/1732617201560/Put/seqid=0 2024-11-26T10:33:21,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617261605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617261605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617261613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617261613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617261613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741893_1069 (size=9857) 2024-11-26T10:33:21,625 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/80294c9ff5d74b2da078050a35c10efb 2024-11-26T10:33:21,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/526147ad62ac4068b33cad0b3992b418 is 50, key is test_row_1/C:col10/1732617201560/Put/seqid=0 2024-11-26T10:33:21,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741894_1070 (size=9857) 2024-11-26T10:33:21,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/526147ad62ac4068b33cad0b3992b418 2024-11-26T10:33:21,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/ed35e0ca9998430caa077c85dc9169c8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ed35e0ca9998430caa077c85dc9169c8 2024-11-26T10:33:21,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ed35e0ca9998430caa077c85dc9169c8, entries=250, sequenceid=264, filesize=16.8 K 2024-11-26T10:33:21,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/80294c9ff5d74b2da078050a35c10efb as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/80294c9ff5d74b2da078050a35c10efb 2024-11-26T10:33:21,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/80294c9ff5d74b2da078050a35c10efb, entries=100, sequenceid=264, filesize=9.6 K 2024-11-26T10:33:21,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/526147ad62ac4068b33cad0b3992b418 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/526147ad62ac4068b33cad0b3992b418 2024-11-26T10:33:21,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/526147ad62ac4068b33cad0b3992b418, entries=100, sequenceid=264, filesize=9.6 K 2024-11-26T10:33:21,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6a931c8e80842c8947954ecd8357e9ad in 128ms, sequenceid=264, compaction requested=true 2024-11-26T10:33:21,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:21,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:21,690 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:21,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:21,690 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:21,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:21,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:21,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:21,692 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56532 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:21,692 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46822 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:21,693 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:21,693 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:21,693 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,693 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,693 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/1d767e67856147f382e034042d14c2ce, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c4ada2985a7146f08d482ccc41b7e113, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/e5a4029ac9014bd0ae93a97e97dc9ad6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ed35e0ca9998430caa077c85dc9169c8] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=55.2 K 2024-11-26T10:33:21,693 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9f54751241c84af8b9c55cabe1217ccc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/01cec3fba0634940b16d6058d92a8277, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/e02ba23c08df4311bcd363ed228ae7e6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/80294c9ff5d74b2da078050a35c10efb] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=45.7 K 2024-11-26T10:33:21,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:21,694 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f54751241c84af8b9c55cabe1217ccc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617199941 2024-11-26T10:33:21,694 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d767e67856147f382e034042d14c2ce, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617199941 2024-11-26T10:33:21,694 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 01cec3fba0634940b16d6058d92a8277, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732617200587 2024-11-26T10:33:21,694 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4ada2985a7146f08d482ccc41b7e113, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732617200586 2024-11-26T10:33:21,695 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting e02ba23c08df4311bcd363ed228ae7e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732617201218 2024-11-26T10:33:21,695 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5a4029ac9014bd0ae93a97e97dc9ad6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732617201218 2024-11-26T10:33:21,695 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 80294c9ff5d74b2da078050a35c10efb, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732617201559 2024-11-26T10:33:21,695 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed35e0ca9998430caa077c85dc9169c8, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732617201549 2024-11-26T10:33:21,714 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#56 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:21,715 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/8f92facc5d574cf9923499bb93f1d85c is 50, key is test_row_0/A:col10/1732617201231/Put/seqid=0 2024-11-26T10:33:21,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:21,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-26T10:33:21,725 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#57 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:21,726 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/947713f927f247e59b8f77913580cf52 is 50, key is test_row_0/B:col10/1732617201231/Put/seqid=0 2024-11-26T10:33:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:21,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617261728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617261731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617261733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617261734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-26T10:33:21,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617261735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,739 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-26T10:33:21,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:21,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-26T10:33:21,744 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:21,745 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:21,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-26T10:33:21,745 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:21,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741895_1071 (size=12899) 2024-11-26T10:33:21,753 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/50a6f2a001084d0490759da45e3d01b4 is 50, key is test_row_0/A:col10/1732617201605/Put/seqid=0 2024-11-26T10:33:21,757 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/8f92facc5d574cf9923499bb93f1d85c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8f92facc5d574cf9923499bb93f1d85c 2024-11-26T10:33:21,768 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into 8f92facc5d574cf9923499bb93f1d85c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:21,768 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:21,769 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=12, startTime=1732617201690; duration=0sec 2024-11-26T10:33:21,769 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:21,769 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:21,769 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:21,771 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46788 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:21,771 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/C is initiating minor compaction (all files) 2024-11-26T10:33:21,772 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/C in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,772 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/253e67d32e0e400c947f5ebf79a9fc40, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/dac6b4717845476e817110decb445c14, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/14194599004b4968a5cb09dbfc0b4131, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/526147ad62ac4068b33cad0b3992b418] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=45.7 K 2024-11-26T10:33:21,772 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 253e67d32e0e400c947f5ebf79a9fc40, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617199941 2024-11-26T10:33:21,773 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting dac6b4717845476e817110decb445c14, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732617200587 2024-11-26T10:33:21,774 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14194599004b4968a5cb09dbfc0b4131, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732617201218 2024-11-26T10:33:21,774 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 526147ad62ac4068b33cad0b3992b418, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732617201559 2024-11-26T10:33:21,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741896_1072 (size=12899) 2024-11-26T10:33:21,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741897_1073 (size=12301) 2024-11-26T10:33:21,796 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#C#compaction#59 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:21,797 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/de0c65b0449c4c43bea280c0a1bff8b0 is 50, key is test_row_0/C:col10/1732617201231/Put/seqid=0 2024-11-26T10:33:21,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741898_1074 (size=12865) 2024-11-26T10:33:21,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617261837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617261838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617261838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617261842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:21,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617261842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-26T10:33:21,897 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:21,898 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-26T10:33:21,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:21,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:21,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:21,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:21,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617262041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617262041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617262042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617262045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-26T10:33:22,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617262047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,051 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-26T10:33:22,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:22,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,052 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,194 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/947713f927f247e59b8f77913580cf52 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/947713f927f247e59b8f77913580cf52 2024-11-26T10:33:22,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/50a6f2a001084d0490759da45e3d01b4 2024-11-26T10:33:22,205 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-26T10:33:22,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:22,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,206 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,209 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into 947713f927f247e59b8f77913580cf52(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:22,209 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:22,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/37d7bbcb7f894d5f9400a2435ffab0cd is 50, key is test_row_0/B:col10/1732617201605/Put/seqid=0 2024-11-26T10:33:22,214 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=12, startTime=1732617201690; duration=0sec 2024-11-26T10:33:22,215 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:22,215 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:22,219 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/de0c65b0449c4c43bea280c0a1bff8b0 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/de0c65b0449c4c43bea280c0a1bff8b0 2024-11-26T10:33:22,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741899_1075 (size=12301) 2024-11-26T10:33:22,235 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/C of 6a931c8e80842c8947954ecd8357e9ad into de0c65b0449c4c43bea280c0a1bff8b0(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:22,236 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:22,236 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/C, priority=12, startTime=1732617201691; duration=0sec 2024-11-26T10:33:22,236 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:22,236 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:22,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617262345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617262345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617262347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-26T10:33:22,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617262348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617262350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,360 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-26T10:33:22,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:22,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-26T10:33:22,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:22,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,633 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/37d7bbcb7f894d5f9400a2435ffab0cd 2024-11-26T10:33:22,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/04afbd1628684434a808dbd6712ce879 is 50, key is test_row_0/C:col10/1732617201605/Put/seqid=0 2024-11-26T10:33:22,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741900_1076 (size=12301) 2024-11-26T10:33:22,668 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-26T10:33:22,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:22,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,669 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,821 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-26T10:33:22,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:22,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-26T10:33:22,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617262848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617262851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617262852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617262854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:22,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617262858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,974 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:22,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-26T10:33:22,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:22,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:22,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:22,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:23,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/04afbd1628684434a808dbd6712ce879 2024-11-26T10:33:23,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/50a6f2a001084d0490759da45e3d01b4 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/50a6f2a001084d0490759da45e3d01b4 2024-11-26T10:33:23,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/50a6f2a001084d0490759da45e3d01b4, entries=150, sequenceid=292, filesize=12.0 K 2024-11-26T10:33:23,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/37d7bbcb7f894d5f9400a2435ffab0cd as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/37d7bbcb7f894d5f9400a2435ffab0cd 2024-11-26T10:33:23,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/37d7bbcb7f894d5f9400a2435ffab0cd, entries=150, sequenceid=292, filesize=12.0 K 2024-11-26T10:33:23,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/04afbd1628684434a808dbd6712ce879 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/04afbd1628684434a808dbd6712ce879 2024-11-26T10:33:23,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/04afbd1628684434a808dbd6712ce879, entries=150, sequenceid=292, filesize=12.0 K 2024-11-26T10:33:23,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 6a931c8e80842c8947954ecd8357e9ad in 1367ms, sequenceid=292, compaction requested=false 2024-11-26T10:33:23,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:23,129 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:23,129 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-26T10:33:23,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:23,130 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-26T10:33:23,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:23,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:23,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:23,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:23,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:23,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:23,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/74510dfefcfa406daa9f828c18213890 is 50, key is test_row_1/A:col10/1732617201733/Put/seqid=0 2024-11-26T10:33:23,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741901_1077 (size=9857) 2024-11-26T10:33:23,159 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/74510dfefcfa406daa9f828c18213890 2024-11-26T10:33:23,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/348ceb4cf81349dea56c6a5536c32cf3 is 50, key is test_row_1/B:col10/1732617201733/Put/seqid=0 2024-11-26T10:33:23,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741902_1078 (size=9857) 2024-11-26T10:33:23,186 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/348ceb4cf81349dea56c6a5536c32cf3 2024-11-26T10:33:23,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/393b54b400a748d18a5ed6a8dd7703a6 is 50, key is test_row_1/C:col10/1732617201733/Put/seqid=0 2024-11-26T10:33:23,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741903_1079 (size=9857) 2024-11-26T10:33:23,223 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/393b54b400a748d18a5ed6a8dd7703a6 2024-11-26T10:33:23,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/74510dfefcfa406daa9f828c18213890 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/74510dfefcfa406daa9f828c18213890 2024-11-26T10:33:23,239 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/74510dfefcfa406daa9f828c18213890, entries=100, sequenceid=303, filesize=9.6 K 2024-11-26T10:33:23,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/348ceb4cf81349dea56c6a5536c32cf3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/348ceb4cf81349dea56c6a5536c32cf3 2024-11-26T10:33:23,249 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/348ceb4cf81349dea56c6a5536c32cf3, entries=100, sequenceid=303, filesize=9.6 K 2024-11-26T10:33:23,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/393b54b400a748d18a5ed6a8dd7703a6 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/393b54b400a748d18a5ed6a8dd7703a6 2024-11-26T10:33:23,259 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/393b54b400a748d18a5ed6a8dd7703a6, entries=100, sequenceid=303, filesize=9.6 K 2024-11-26T10:33:23,260 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 6a931c8e80842c8947954ecd8357e9ad in 130ms, sequenceid=303, compaction requested=true 2024-11-26T10:33:23,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:23,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:23,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-26T10:33:23,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-26T10:33:23,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-26T10:33:23,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5170 sec 2024-11-26T10:33:23,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.5240 sec 2024-11-26T10:33:23,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-26T10:33:23,850 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-26T10:33:23,851 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:23,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-26T10:33:23,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-26T10:33:23,854 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:23,855 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:23,855 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:23,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:23,871 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-26T10:33:23,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:23,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:23,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:23,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:23,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:23,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:23,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/997ce537c8434277934b02e82512a699 is 50, key is test_row_0/A:col10/1732617203870/Put/seqid=0 2024-11-26T10:33:23,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:23,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741904_1080 (size=12301) 2024-11-26T10:33:23,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617263896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:23,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/997ce537c8434277934b02e82512a699 2024-11-26T10:33:23,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:23,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:23,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617263899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:23,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617263898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:23,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:23,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:23,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617263900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:23,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617263901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:23,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/3bb122fc1a8c4941983ee2a22b11d164 is 50, key is test_row_0/B:col10/1732617203870/Put/seqid=0 2024-11-26T10:33:23,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741905_1081 (size=12301) 2024-11-26T10:33:23,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/3bb122fc1a8c4941983ee2a22b11d164 2024-11-26T10:33:23,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/8cda4f8edc804cf796faff52b716b987 is 50, key is test_row_0/C:col10/1732617203870/Put/seqid=0 2024-11-26T10:33:23,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-26T10:33:23,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741906_1082 (size=12301) 2024-11-26T10:33:23,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/8cda4f8edc804cf796faff52b716b987 2024-11-26T10:33:23,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/997ce537c8434277934b02e82512a699 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/997ce537c8434277934b02e82512a699 2024-11-26T10:33:23,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/997ce537c8434277934b02e82512a699, entries=150, sequenceid=317, filesize=12.0 K 2024-11-26T10:33:23,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/3bb122fc1a8c4941983ee2a22b11d164 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/3bb122fc1a8c4941983ee2a22b11d164 2024-11-26T10:33:23,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/3bb122fc1a8c4941983ee2a22b11d164, entries=150, sequenceid=317, filesize=12.0 K 2024-11-26T10:33:23,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/8cda4f8edc804cf796faff52b716b987 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/8cda4f8edc804cf796faff52b716b987 2024-11-26T10:33:24,001 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/8cda4f8edc804cf796faff52b716b987, entries=150, sequenceid=317, filesize=12.0 K 2024-11-26T10:33:24,003 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 6a931c8e80842c8947954ecd8357e9ad in 132ms, sequenceid=317, compaction requested=true 2024-11-26T10:33:24,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:24,004 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:24,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:24,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:24,004 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:24,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:24,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:24,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:24,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:24,006 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47358 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:24,006 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:24,007 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:24,007 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8f92facc5d574cf9923499bb93f1d85c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/50a6f2a001084d0490759da45e3d01b4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/74510dfefcfa406daa9f828c18213890, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/997ce537c8434277934b02e82512a699] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=46.2 K 2024-11-26T10:33:24,007 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-26T10:33:24,008 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f92facc5d574cf9923499bb93f1d85c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732617201231 2024-11-26T10:33:24,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:24,008 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-26T10:33:24,009 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47358 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:24,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:24,009 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:24,009 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:24,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:24,010 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/947713f927f247e59b8f77913580cf52, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/37d7bbcb7f894d5f9400a2435ffab0cd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/348ceb4cf81349dea56c6a5536c32cf3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/3bb122fc1a8c4941983ee2a22b11d164] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=46.2 K 2024-11-26T10:33:24,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:24,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:24,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:24,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:24,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:24,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:24,010 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 947713f927f247e59b8f77913580cf52, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732617201231 2024-11-26T10:33:24,010 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50a6f2a001084d0490759da45e3d01b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732617201605 2024-11-26T10:33:24,012 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 37d7bbcb7f894d5f9400a2435ffab0cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732617201605 2024-11-26T10:33:24,012 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 348ceb4cf81349dea56c6a5536c32cf3, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732617201733 2024-11-26T10:33:24,014 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bb122fc1a8c4941983ee2a22b11d164, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732617203870 2024-11-26T10:33:24,014 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74510dfefcfa406daa9f828c18213890, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732617201733 2024-11-26T10:33:24,016 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 997ce537c8434277934b02e82512a699, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732617203870 2024-11-26T10:33:24,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/b9ca1a9c7df4417598ae70c15aa8b63f is 50, key is test_row_0/A:col10/1732617204007/Put/seqid=0 2024-11-26T10:33:24,036 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:24,037 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/1169269d26d84103b412eae7ff346eaa is 50, key is test_row_0/B:col10/1732617203870/Put/seqid=0 2024-11-26T10:33:24,041 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#70 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:24,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741907_1083 (size=12301) 2024-11-26T10:33:24,041 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/77df0c371bc14ca89ed97e622f6b1f7d is 50, key is test_row_0/A:col10/1732617203870/Put/seqid=0 2024-11-26T10:33:24,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741908_1084 (size=13085) 2024-11-26T10:33:24,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741909_1085 (size=13085) 2024-11-26T10:33:24,060 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/1169269d26d84103b412eae7ff346eaa as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/1169269d26d84103b412eae7ff346eaa 2024-11-26T10:33:24,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617264023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617264038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617264061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617264063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617264065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,079 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into 1169269d26d84103b412eae7ff346eaa(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:24,079 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:24,079 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=12, startTime=1732617204004; duration=0sec 2024-11-26T10:33:24,080 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:24,080 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:24,080 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:24,082 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47324 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:24,082 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/C is initiating minor compaction (all files) 2024-11-26T10:33:24,082 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/C in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:24,082 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/de0c65b0449c4c43bea280c0a1bff8b0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/04afbd1628684434a808dbd6712ce879, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/393b54b400a748d18a5ed6a8dd7703a6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/8cda4f8edc804cf796faff52b716b987] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=46.2 K 2024-11-26T10:33:24,083 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting de0c65b0449c4c43bea280c0a1bff8b0, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732617201231 2024-11-26T10:33:24,084 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 04afbd1628684434a808dbd6712ce879, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732617201605 2024-11-26T10:33:24,084 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 393b54b400a748d18a5ed6a8dd7703a6, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732617201733 2024-11-26T10:33:24,085 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cda4f8edc804cf796faff52b716b987, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732617203870 2024-11-26T10:33:24,112 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#C#compaction#71 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:24,113 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/1cdf0569dda24789b272489aae3b46a3 is 50, key is test_row_0/C:col10/1732617203870/Put/seqid=0 2024-11-26T10:33:24,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741910_1086 (size=13051) 2024-11-26T10:33:24,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-26T10:33:24,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617264162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617264169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617264172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617264172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617264173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617264366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617264370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617264375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617264377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617264378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,442 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/b9ca1a9c7df4417598ae70c15aa8b63f 2024-11-26T10:33:24,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/f6487cefa3564f0bb71edda2cdee708d is 50, key is test_row_0/B:col10/1732617204007/Put/seqid=0 2024-11-26T10:33:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-26T10:33:24,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741911_1087 (size=12301) 2024-11-26T10:33:24,468 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/f6487cefa3564f0bb71edda2cdee708d 2024-11-26T10:33:24,477 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/77df0c371bc14ca89ed97e622f6b1f7d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/77df0c371bc14ca89ed97e622f6b1f7d 2024-11-26T10:33:24,494 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into 77df0c371bc14ca89ed97e622f6b1f7d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:24,494 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:24,494 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=12, startTime=1732617204004; duration=0sec 2024-11-26T10:33:24,494 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:24,494 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:24,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/96b00a2aa9384bab83cb3056fe3ed8fe is 50, key is test_row_0/C:col10/1732617204007/Put/seqid=0 2024-11-26T10:33:24,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741912_1088 (size=12301) 2024-11-26T10:33:24,527 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/1cdf0569dda24789b272489aae3b46a3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1cdf0569dda24789b272489aae3b46a3 2024-11-26T10:33:24,537 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/C of 6a931c8e80842c8947954ecd8357e9ad into 1cdf0569dda24789b272489aae3b46a3(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:24,537 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:24,537 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/C, priority=12, startTime=1732617204006; duration=0sec 2024-11-26T10:33:24,537 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:24,538 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:24,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617264671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617264674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617264680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617264680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:24,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617264680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:24,917 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/96b00a2aa9384bab83cb3056fe3ed8fe 2024-11-26T10:33:24,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/b9ca1a9c7df4417598ae70c15aa8b63f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/b9ca1a9c7df4417598ae70c15aa8b63f 2024-11-26T10:33:24,930 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/b9ca1a9c7df4417598ae70c15aa8b63f, entries=150, sequenceid=342, filesize=12.0 K 2024-11-26T10:33:24,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/f6487cefa3564f0bb71edda2cdee708d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f6487cefa3564f0bb71edda2cdee708d 2024-11-26T10:33:24,939 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f6487cefa3564f0bb71edda2cdee708d, entries=150, sequenceid=342, filesize=12.0 K 2024-11-26T10:33:24,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/96b00a2aa9384bab83cb3056fe3ed8fe as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/96b00a2aa9384bab83cb3056fe3ed8fe 2024-11-26T10:33:24,950 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/96b00a2aa9384bab83cb3056fe3ed8fe, entries=150, sequenceid=342, filesize=12.0 K 2024-11-26T10:33:24,951 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 6a931c8e80842c8947954ecd8357e9ad in 943ms, sequenceid=342, compaction requested=false 2024-11-26T10:33:24,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:24,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:24,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-26T10:33:24,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-26T10:33:24,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-26T10:33:24,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0990 sec 2024-11-26T10:33:24,957 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.1050 sec 2024-11-26T10:33:24,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-26T10:33:24,968 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-26T10:33:24,970 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:24,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-26T10:33:24,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-26T10:33:24,974 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:24,975 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:24,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:25,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-26T10:33:25,129 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-26T10:33:25,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:25,130 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:33:25,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:25,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:25,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:25,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:25,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:25,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:25,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/f5756f155680495b90888db72c264a32 is 50, key is test_row_0/A:col10/1732617204023/Put/seqid=0 2024-11-26T10:33:25,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:25,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:25,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741913_1089 (size=12301) 2024-11-26T10:33:25,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-26T10:33:25,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617265296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617265296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617265298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617265298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617265301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617265402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617265402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617265403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617265403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617265406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-26T10:33:25,581 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/f5756f155680495b90888db72c264a32 2024-11-26T10:33:25,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/58f9b86484644fcbac52a152bf2856c9 is 50, key is test_row_0/B:col10/1732617204023/Put/seqid=0 2024-11-26T10:33:25,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741914_1090 (size=12301) 2024-11-26T10:33:25,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617265607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617265608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617265608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617265608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617265609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617265911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617265911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617265911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617265912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:25,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617265913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:25,995 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/58f9b86484644fcbac52a152bf2856c9 2024-11-26T10:33:26,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/b6924dfb8ade4a229f09e981801ff2c4 is 50, key is test_row_0/C:col10/1732617204023/Put/seqid=0 2024-11-26T10:33:26,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741915_1091 (size=12301) 2024-11-26T10:33:26,012 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/b6924dfb8ade4a229f09e981801ff2c4 2024-11-26T10:33:26,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/f5756f155680495b90888db72c264a32 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/f5756f155680495b90888db72c264a32 2024-11-26T10:33:26,024 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/f5756f155680495b90888db72c264a32, entries=150, sequenceid=358, filesize=12.0 K 2024-11-26T10:33:26,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/58f9b86484644fcbac52a152bf2856c9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/58f9b86484644fcbac52a152bf2856c9 2024-11-26T10:33:26,032 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/58f9b86484644fcbac52a152bf2856c9, entries=150, sequenceid=358, filesize=12.0 K 2024-11-26T10:33:26,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/b6924dfb8ade4a229f09e981801ff2c4 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b6924dfb8ade4a229f09e981801ff2c4 2024-11-26T10:33:26,042 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b6924dfb8ade4a229f09e981801ff2c4, entries=150, sequenceid=358, filesize=12.0 K 2024-11-26T10:33:26,045 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6a931c8e80842c8947954ecd8357e9ad in 915ms, sequenceid=358, compaction requested=true 2024-11-26T10:33:26,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:26,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-26T10:33:26,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-26T10:33:26,048 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-26T10:33:26,049 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0710 sec 2024-11-26T10:33:26,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.0810 sec 2024-11-26T10:33:26,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-26T10:33:26,077 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-26T10:33:26,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:26,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-26T10:33:26,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-26T10:33:26,082 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:26,085 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:26,085 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-26T10:33:26,238 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-26T10:33:26,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,239 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-26T10:33:26,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:26,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:26,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:26,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:26,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:26,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:26,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/efc5583d048540a5b0f27ca42ece6c67 is 50, key is test_row_0/A:col10/1732617205299/Put/seqid=0 2024-11-26T10:33:26,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741916_1092 (size=12301) 2024-11-26T10:33:26,252 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/efc5583d048540a5b0f27ca42ece6c67 2024-11-26T10:33:26,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/17d44034876e4bf6a17e7583b7a95af5 is 50, key is test_row_0/B:col10/1732617205299/Put/seqid=0 2024-11-26T10:33:26,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741917_1093 (size=12301) 2024-11-26T10:33:26,299 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/17d44034876e4bf6a17e7583b7a95af5 2024-11-26T10:33:26,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e0b57dee62694719a9da03a7fc4059d4 is 50, key is test_row_0/C:col10/1732617205299/Put/seqid=0 2024-11-26T10:33:26,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741918_1094 (size=12301) 2024-11-26T10:33:26,337 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e0b57dee62694719a9da03a7fc4059d4 2024-11-26T10:33:26,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/efc5583d048540a5b0f27ca42ece6c67 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/efc5583d048540a5b0f27ca42ece6c67 2024-11-26T10:33:26,355 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/efc5583d048540a5b0f27ca42ece6c67, entries=150, sequenceid=381, filesize=12.0 K 2024-11-26T10:33:26,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/17d44034876e4bf6a17e7583b7a95af5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17d44034876e4bf6a17e7583b7a95af5 2024-11-26T10:33:26,363 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17d44034876e4bf6a17e7583b7a95af5, entries=150, sequenceid=381, filesize=12.0 K 2024-11-26T10:33:26,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e0b57dee62694719a9da03a7fc4059d4 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e0b57dee62694719a9da03a7fc4059d4 2024-11-26T10:33:26,372 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e0b57dee62694719a9da03a7fc4059d4, entries=150, sequenceid=381, filesize=12.0 K 2024-11-26T10:33:26,374 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 6a931c8e80842c8947954ecd8357e9ad in 134ms, sequenceid=381, compaction requested=true 2024-11-26T10:33:26,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:26,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-26T10:33:26,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-26T10:33:26,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-26T10:33:26,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 290 msec 2024-11-26T10:33:26,381 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 300 msec 2024-11-26T10:33:26,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-26T10:33:26,385 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-26T10:33:26,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:26,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-26T10:33:26,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-26T10:33:26,388 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:26,389 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:26,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:26,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:26,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-26T10:33:26,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:26,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:26,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:26,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:26,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:26,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:26,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/3373fd420b3a46a99ed212d476ea7a74 is 50, key is test_row_0/A:col10/1732617206420/Put/seqid=0 2024-11-26T10:33:26,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741919_1095 (size=12301) 2024-11-26T10:33:26,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/3373fd420b3a46a99ed212d476ea7a74 2024-11-26T10:33:26,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617266455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617266456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617266457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617266460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617266460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/fb2da11529df4b81b0d6daaa70795398 is 50, key is test_row_0/B:col10/1732617206420/Put/seqid=0 2024-11-26T10:33:26,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741920_1096 (size=12301) 2024-11-26T10:33:26,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/fb2da11529df4b81b0d6daaa70795398 2024-11-26T10:33:26,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-26T10:33:26,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/1b76ceca4cf74046a6487002474c20fd is 50, key is test_row_0/C:col10/1732617206420/Put/seqid=0 2024-11-26T10:33:26,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741921_1097 (size=12301) 2024-11-26T10:33:26,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/1b76ceca4cf74046a6487002474c20fd 2024-11-26T10:33:26,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/3373fd420b3a46a99ed212d476ea7a74 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3373fd420b3a46a99ed212d476ea7a74 2024-11-26T10:33:26,540 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:26,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:26,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:26,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:26,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:26,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3373fd420b3a46a99ed212d476ea7a74, entries=150, sequenceid=392, filesize=12.0 K 2024-11-26T10:33:26,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/fb2da11529df4b81b0d6daaa70795398 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/fb2da11529df4b81b0d6daaa70795398 2024-11-26T10:33:26,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/fb2da11529df4b81b0d6daaa70795398, entries=150, sequenceid=392, filesize=12.0 K 2024-11-26T10:33:26,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/1b76ceca4cf74046a6487002474c20fd as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1b76ceca4cf74046a6487002474c20fd 2024-11-26T10:33:26,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617266561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1b76ceca4cf74046a6487002474c20fd, entries=150, sequenceid=392, filesize=12.0 K 2024-11-26T10:33:26,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617266562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,568 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6a931c8e80842c8947954ecd8357e9ad in 141ms, sequenceid=392, compaction requested=true 2024-11-26T10:33:26,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:26,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:26,569 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-26T10:33:26,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:26,569 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-26T10:33:26,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:26,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:26,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:26,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:26,572 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62289 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-26T10:33:26,572 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:26,572 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,572 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/77df0c371bc14ca89ed97e622f6b1f7d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/b9ca1a9c7df4417598ae70c15aa8b63f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/f5756f155680495b90888db72c264a32, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/efc5583d048540a5b0f27ca42ece6c67, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3373fd420b3a46a99ed212d476ea7a74] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=60.8 K 2024-11-26T10:33:26,573 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62289 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-26T10:33:26,573 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:26,573 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,573 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/1169269d26d84103b412eae7ff346eaa, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f6487cefa3564f0bb71edda2cdee708d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/58f9b86484644fcbac52a152bf2856c9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17d44034876e4bf6a17e7583b7a95af5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/fb2da11529df4b81b0d6daaa70795398] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=60.8 K 2024-11-26T10:33:26,573 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77df0c371bc14ca89ed97e622f6b1f7d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732617203870 2024-11-26T10:33:26,574 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1169269d26d84103b412eae7ff346eaa, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732617203870 2024-11-26T10:33:26,574 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9ca1a9c7df4417598ae70c15aa8b63f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732617203898 2024-11-26T10:33:26,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:26,574 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting f6487cefa3564f0bb71edda2cdee708d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732617203898 2024-11-26T10:33:26,575 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5756f155680495b90888db72c264a32, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732617204023 2024-11-26T10:33:26,575 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 58f9b86484644fcbac52a152bf2856c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732617204023 2024-11-26T10:33:26,576 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 17d44034876e4bf6a17e7583b7a95af5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732617205199 2024-11-26T10:33:26,576 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting efc5583d048540a5b0f27ca42ece6c67, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732617205199 2024-11-26T10:33:26,576 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-26T10:33:26,576 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting fb2da11529df4b81b0d6daaa70795398, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732617206420 2024-11-26T10:33:26,576 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3373fd420b3a46a99ed212d476ea7a74, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732617206420 2024-11-26T10:33:26,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:26,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:26,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:26,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:26,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:26,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:26,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617266586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617266584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,593 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#83 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:26,594 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/234c4ad051cf4774a450c70fd891f1db is 50, key is test_row_0/B:col10/1732617206420/Put/seqid=0 2024-11-26T10:33:26,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617266592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,600 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/858ffa5dd2af4655b52e75d165b10e75 is 50, key is test_row_0/A:col10/1732617206572/Put/seqid=0 2024-11-26T10:33:26,603 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#85 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:26,603 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/895142e2f0d3499c9f5e77456e999b15 is 50, key is test_row_0/A:col10/1732617206420/Put/seqid=0 2024-11-26T10:33:26,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741922_1098 (size=13255) 2024-11-26T10:33:26,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741923_1099 (size=12301) 2024-11-26T10:33:26,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741924_1100 (size=13255) 2024-11-26T10:33:26,639 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/895142e2f0d3499c9f5e77456e999b15 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/895142e2f0d3499c9f5e77456e999b15 2024-11-26T10:33:26,647 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into 895142e2f0d3499c9f5e77456e999b15(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:26,647 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:26,647 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=11, startTime=1732617206568; duration=0sec 2024-11-26T10:33:26,647 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:26,647 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:26,647 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-26T10:33:26,649 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62255 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-26T10:33:26,649 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/C is initiating minor compaction (all files) 2024-11-26T10:33:26,649 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/C in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,649 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1cdf0569dda24789b272489aae3b46a3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/96b00a2aa9384bab83cb3056fe3ed8fe, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b6924dfb8ade4a229f09e981801ff2c4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e0b57dee62694719a9da03a7fc4059d4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1b76ceca4cf74046a6487002474c20fd] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=60.8 K 2024-11-26T10:33:26,649 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cdf0569dda24789b272489aae3b46a3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732617203870 2024-11-26T10:33:26,650 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96b00a2aa9384bab83cb3056fe3ed8fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732617203898 2024-11-26T10:33:26,650 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6924dfb8ade4a229f09e981801ff2c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732617204023 2024-11-26T10:33:26,650 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0b57dee62694719a9da03a7fc4059d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732617205199 2024-11-26T10:33:26,651 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b76ceca4cf74046a6487002474c20fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732617206420 2024-11-26T10:33:26,663 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#C#compaction#86 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:26,664 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/d57d32fc88ea4fd7a1d262e978f2e3f6 is 50, key is test_row_0/C:col10/1732617206420/Put/seqid=0 2024-11-26T10:33:26,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741925_1101 (size=13221) 2024-11-26T10:33:26,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-26T10:33:26,694 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:26,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:26,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:26,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:26,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:26,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617266694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617266695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617266699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617266769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617266769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,848 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:26,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:26,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:26,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:26,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:26,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:26,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617266899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617266899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:26,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617266903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:26,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-26T10:33:27,000 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:27,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:27,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/858ffa5dd2af4655b52e75d165b10e75 2024-11-26T10:33:27,032 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/234c4ad051cf4774a450c70fd891f1db as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/234c4ad051cf4774a450c70fd891f1db 2024-11-26T10:33:27,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/463de66ed9d64cd5a66470e442e80541 is 50, key is test_row_0/B:col10/1732617206572/Put/seqid=0 2024-11-26T10:33:27,039 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into 234c4ad051cf4774a450c70fd891f1db(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:27,039 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:27,039 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=11, startTime=1732617206569; duration=0sec 2024-11-26T10:33:27,039 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:27,040 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:27,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741926_1102 (size=12301) 2024-11-26T10:33:27,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617267072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617267073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,079 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/d57d32fc88ea4fd7a1d262e978f2e3f6 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/d57d32fc88ea4fd7a1d262e978f2e3f6 2024-11-26T10:33:27,086 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/C of 6a931c8e80842c8947954ecd8357e9ad into d57d32fc88ea4fd7a1d262e978f2e3f6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:27,087 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:27,087 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/C, priority=11, startTime=1732617206571; duration=0sec 2024-11-26T10:33:27,087 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:27,087 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:27,154 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:27,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:27,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,155 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617267201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617267206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617267210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,307 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:27,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:27,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,449 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/463de66ed9d64cd5a66470e442e80541 2024-11-26T10:33:27,460 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:27,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:27,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,461 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/290e327985c0494cb04527b2de3f45fb is 50, key is test_row_0/C:col10/1732617206572/Put/seqid=0 2024-11-26T10:33:27,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-26T10:33:27,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741927_1103 (size=12301) 2024-11-26T10:33:27,493 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/290e327985c0494cb04527b2de3f45fb 2024-11-26T10:33:27,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/858ffa5dd2af4655b52e75d165b10e75 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/858ffa5dd2af4655b52e75d165b10e75 2024-11-26T10:33:27,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/858ffa5dd2af4655b52e75d165b10e75, entries=150, sequenceid=421, filesize=12.0 K 2024-11-26T10:33:27,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/463de66ed9d64cd5a66470e442e80541 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/463de66ed9d64cd5a66470e442e80541 2024-11-26T10:33:27,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/463de66ed9d64cd5a66470e442e80541, entries=150, sequenceid=421, filesize=12.0 K 2024-11-26T10:33:27,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/290e327985c0494cb04527b2de3f45fb as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/290e327985c0494cb04527b2de3f45fb 2024-11-26T10:33:27,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/290e327985c0494cb04527b2de3f45fb, entries=150, sequenceid=421, filesize=12.0 K 2024-11-26T10:33:27,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 6a931c8e80842c8947954ecd8357e9ad in 953ms, sequenceid=421, compaction requested=false 2024-11-26T10:33:27,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:27,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:27,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-26T10:33:27,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:27,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:27,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:27,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:27,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:27,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:27,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/6ac35095e42d4a30b83efc8a5bdc2360 is 50, key is test_row_0/A:col10/1732617207584/Put/seqid=0 2024-11-26T10:33:27,612 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:27,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:27,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,613 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741928_1104 (size=12297) 2024-11-26T10:33:27,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/6ac35095e42d4a30b83efc8a5bdc2360 2024-11-26T10:33:27,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617267624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617267625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/f8f4290f6bd849758c52b2e5cbd458f0 is 50, key is test_row_0/B:col10/1732617207584/Put/seqid=0 2024-11-26T10:33:27,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741929_1105 (size=9857) 2024-11-26T10:33:27,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/f8f4290f6bd849758c52b2e5cbd458f0 2024-11-26T10:33:27,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/a0f341cab710448eb964a1b9b9f9e25b is 50, key is test_row_0/C:col10/1732617207584/Put/seqid=0 2024-11-26T10:33:27,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741930_1106 (size=9857) 2024-11-26T10:33:27,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617267704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617267712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617267715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617267731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617267734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,766 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:27,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:27,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,767 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,921 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:27,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:27,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:27,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:27,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617267933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:27,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617267936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=436 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/a0f341cab710448eb964a1b9b9f9e25b 2024-11-26T10:33:28,074 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:28,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:28,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:28,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:28,075 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:28,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:28,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:28,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/6ac35095e42d4a30b83efc8a5bdc2360 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac35095e42d4a30b83efc8a5bdc2360 2024-11-26T10:33:28,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac35095e42d4a30b83efc8a5bdc2360, entries=150, sequenceid=436, filesize=12.0 K 2024-11-26T10:33:28,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/f8f4290f6bd849758c52b2e5cbd458f0 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f8f4290f6bd849758c52b2e5cbd458f0 2024-11-26T10:33:28,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f8f4290f6bd849758c52b2e5cbd458f0, entries=100, sequenceid=436, filesize=9.6 K 2024-11-26T10:33:28,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/a0f341cab710448eb964a1b9b9f9e25b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a0f341cab710448eb964a1b9b9f9e25b 2024-11-26T10:33:28,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a0f341cab710448eb964a1b9b9f9e25b, entries=100, sequenceid=436, filesize=9.6 K 2024-11-26T10:33:28,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6a931c8e80842c8947954ecd8357e9ad in 517ms, sequenceid=436, compaction requested=true 2024-11-26T10:33:28,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:28,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:28,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:28,103 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:28,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:28,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:28,103 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:28,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:28,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:28,104 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37853 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:28,104 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:28,104 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:28,104 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:28,104 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:28,104 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:28,104 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/895142e2f0d3499c9f5e77456e999b15, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/858ffa5dd2af4655b52e75d165b10e75, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac35095e42d4a30b83efc8a5bdc2360] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=37.0 K 2024-11-26T10:33:28,104 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/234c4ad051cf4774a450c70fd891f1db, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/463de66ed9d64cd5a66470e442e80541, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f8f4290f6bd849758c52b2e5cbd458f0] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=34.6 K 2024-11-26T10:33:28,105 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 895142e2f0d3499c9f5e77456e999b15, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732617206420 2024-11-26T10:33:28,105 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 234c4ad051cf4774a450c70fd891f1db, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732617206420 2024-11-26T10:33:28,105 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 858ffa5dd2af4655b52e75d165b10e75, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1732617206455 2024-11-26T10:33:28,105 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 463de66ed9d64cd5a66470e442e80541, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1732617206455 2024-11-26T10:33:28,105 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ac35095e42d4a30b83efc8a5bdc2360, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732617207579 2024-11-26T10:33:28,106 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting f8f4290f6bd849758c52b2e5cbd458f0, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732617207583 2024-11-26T10:33:28,116 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#92 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:28,117 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/f3e8b446fde8401188984e8dabbc20a8 is 50, key is test_row_0/B:col10/1732617207584/Put/seqid=0 2024-11-26T10:33:28,120 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#93 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:28,123 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/00d528e38e2643db8651344842949d80 is 50, key is test_row_0/A:col10/1732617207584/Put/seqid=0 2024-11-26T10:33:28,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741931_1107 (size=13357) 2024-11-26T10:33:28,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741932_1108 (size=13357) 2024-11-26T10:33:28,229 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,230 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-26T10:33:28,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:28,230 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-26T10:33:28,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:28,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:28,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:28,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:28,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:28,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:28,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/8ab2b2b3e9da4efbb978ba131a4649e2 is 50, key is test_row_0/A:col10/1732617207613/Put/seqid=0 2024-11-26T10:33:28,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:28,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:28,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741933_1109 (size=12301) 2024-11-26T10:33:28,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617268248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617268251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617268351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617268353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-26T10:33:28,534 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/f3e8b446fde8401188984e8dabbc20a8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f3e8b446fde8401188984e8dabbc20a8 2024-11-26T10:33:28,542 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/00d528e38e2643db8651344842949d80 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/00d528e38e2643db8651344842949d80 2024-11-26T10:33:28,544 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into f3e8b446fde8401188984e8dabbc20a8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:28,544 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:28,544 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=13, startTime=1732617208103; duration=0sec 2024-11-26T10:33:28,547 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:28,547 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:28,547 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:28,549 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35379 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:28,549 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/C is initiating minor compaction (all files) 2024-11-26T10:33:28,549 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/C in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:28,549 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/d57d32fc88ea4fd7a1d262e978f2e3f6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/290e327985c0494cb04527b2de3f45fb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a0f341cab710448eb964a1b9b9f9e25b] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=34.5 K 2024-11-26T10:33:28,550 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting d57d32fc88ea4fd7a1d262e978f2e3f6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732617206420 2024-11-26T10:33:28,551 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 290e327985c0494cb04527b2de3f45fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1732617206455 2024-11-26T10:33:28,551 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into 00d528e38e2643db8651344842949d80(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:28,551 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a0f341cab710448eb964a1b9b9f9e25b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732617207583 2024-11-26T10:33:28,551 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:28,551 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=13, startTime=1732617208103; duration=0sec 2024-11-26T10:33:28,551 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:28,551 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:28,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617268554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617268558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,565 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#C#compaction#95 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:28,566 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/18050d6399b74bd0a59fc07358f7b6b7 is 50, key is test_row_0/C:col10/1732617207584/Put/seqid=0 2024-11-26T10:33:28,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741934_1110 (size=13323) 2024-11-26T10:33:28,653 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/8ab2b2b3e9da4efbb978ba131a4649e2 2024-11-26T10:33:28,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/ffafd61ae6634365bcc350fbd90187cc is 50, key is test_row_0/B:col10/1732617207613/Put/seqid=0 2024-11-26T10:33:28,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741935_1111 (size=12301) 2024-11-26T10:33:28,670 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/ffafd61ae6634365bcc350fbd90187cc 2024-11-26T10:33:28,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/5df75b3c7c524d278476a1aebe06869e is 50, key is test_row_0/C:col10/1732617207613/Put/seqid=0 2024-11-26T10:33:28,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741936_1112 (size=12301) 2024-11-26T10:33:28,685 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/5df75b3c7c524d278476a1aebe06869e 2024-11-26T10:33:28,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/8ab2b2b3e9da4efbb978ba131a4649e2 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8ab2b2b3e9da4efbb978ba131a4649e2 2024-11-26T10:33:28,699 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8ab2b2b3e9da4efbb978ba131a4649e2, entries=150, sequenceid=460, filesize=12.0 K 2024-11-26T10:33:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/ffafd61ae6634365bcc350fbd90187cc as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/ffafd61ae6634365bcc350fbd90187cc 2024-11-26T10:33:28,709 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/ffafd61ae6634365bcc350fbd90187cc, entries=150, sequenceid=460, filesize=12.0 K 2024-11-26T10:33:28,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/5df75b3c7c524d278476a1aebe06869e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/5df75b3c7c524d278476a1aebe06869e 2024-11-26T10:33:28,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617268710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,718 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/5df75b3c7c524d278476a1aebe06869e, entries=150, sequenceid=460, filesize=12.0 K 2024-11-26T10:33:28,720 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 6a931c8e80842c8947954ecd8357e9ad in 490ms, sequenceid=460, compaction requested=false 2024-11-26T10:33:28,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:28,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:28,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-26T10:33:28,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-26T10:33:28,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-26T10:33:28,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3320 sec 2024-11-26T10:33:28,725 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 2.3370 sec 2024-11-26T10:33:28,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:33:28,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:28,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:28,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:28,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:28,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:28,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:28,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:28,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/81109afc88ab4a9184f8702d5424f6ee is 50, key is test_row_0/A:col10/1732617208247/Put/seqid=0 2024-11-26T10:33:28,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617268761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617268761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741937_1113 (size=9857) 2024-11-26T10:33:28,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/81109afc88ab4a9184f8702d5424f6ee 2024-11-26T10:33:28,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/7772fca187294aa8ba22d41b257549eb is 50, key is test_row_0/B:col10/1732617208247/Put/seqid=0 2024-11-26T10:33:28,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741938_1114 (size=9857) 2024-11-26T10:33:28,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617268856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617268860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617268866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:28,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617268867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:28,987 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/18050d6399b74bd0a59fc07358f7b6b7 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/18050d6399b74bd0a59fc07358f7b6b7 2024-11-26T10:33:28,994 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/C of 6a931c8e80842c8947954ecd8357e9ad into 18050d6399b74bd0a59fc07358f7b6b7(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:28,994 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:28,995 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/C, priority=13, startTime=1732617208103; duration=0sec 2024-11-26T10:33:28,995 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:28,995 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:29,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:29,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617269068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:29,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:29,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617269070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:29,189 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/7772fca187294aa8ba22d41b257549eb 2024-11-26T10:33:29,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/cf1ce271423c49069168fa868877a4f7 is 50, key is test_row_0/C:col10/1732617208247/Put/seqid=0 2024-11-26T10:33:29,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741939_1115 (size=9857) 2024-11-26T10:33:29,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:29,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617269360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:29,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:29,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617269363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:29,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:29,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617269370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:29,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:29,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617269374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:29,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/cf1ce271423c49069168fa868877a4f7 2024-11-26T10:33:29,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/81109afc88ab4a9184f8702d5424f6ee as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/81109afc88ab4a9184f8702d5424f6ee 2024-11-26T10:33:29,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/81109afc88ab4a9184f8702d5424f6ee, entries=100, sequenceid=475, filesize=9.6 K 2024-11-26T10:33:29,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/7772fca187294aa8ba22d41b257549eb as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/7772fca187294aa8ba22d41b257549eb 2024-11-26T10:33:29,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/7772fca187294aa8ba22d41b257549eb, entries=100, sequenceid=475, filesize=9.6 K 2024-11-26T10:33:29,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/cf1ce271423c49069168fa868877a4f7 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/cf1ce271423c49069168fa868877a4f7 2024-11-26T10:33:29,629 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/cf1ce271423c49069168fa868877a4f7, entries=100, sequenceid=475, filesize=9.6 K 2024-11-26T10:33:29,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6a931c8e80842c8947954ecd8357e9ad in 906ms, sequenceid=475, compaction requested=true 2024-11-26T10:33:29,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:29,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:29,631 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:29,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:29,631 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:29,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:29,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:29,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:29,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:29,633 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:29,633 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:29,633 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:29,633 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f3e8b446fde8401188984e8dabbc20a8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/ffafd61ae6634365bcc350fbd90187cc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/7772fca187294aa8ba22d41b257549eb] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=34.7 K 2024-11-26T10:33:29,634 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:29,634 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:29,634 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:29,634 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/00d528e38e2643db8651344842949d80, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8ab2b2b3e9da4efbb978ba131a4649e2, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/81109afc88ab4a9184f8702d5424f6ee] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=34.7 K 2024-11-26T10:33:29,634 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting f3e8b446fde8401188984e8dabbc20a8, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732617206569 2024-11-26T10:33:29,635 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00d528e38e2643db8651344842949d80, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732617206569 2024-11-26T10:33:29,635 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting ffafd61ae6634365bcc350fbd90187cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732617207613 2024-11-26T10:33:29,635 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ab2b2b3e9da4efbb978ba131a4649e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732617207613 2024-11-26T10:33:29,635 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 7772fca187294aa8ba22d41b257549eb, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732617208247 2024-11-26T10:33:29,636 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81109afc88ab4a9184f8702d5424f6ee, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732617208247 2024-11-26T10:33:29,648 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#101 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:29,648 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/18392aab8ccd40839d41bc2529e84618 is 50, key is test_row_0/B:col10/1732617208247/Put/seqid=0 2024-11-26T10:33:29,653 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#102 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:29,653 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/30669ddb4045465689a655502cd1ce02 is 50, key is test_row_0/A:col10/1732617208247/Put/seqid=0 2024-11-26T10:33:29,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741940_1116 (size=13459) 2024-11-26T10:33:29,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741941_1117 (size=13459) 2024-11-26T10:33:29,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:29,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-26T10:33:29,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:29,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:29,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:29,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:29,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:29,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:29,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/3bfe3cb996254638adcbff9191a44c26 is 50, key is test_row_0/A:col10/1732617208760/Put/seqid=0 2024-11-26T10:33:29,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741942_1118 (size=14741) 2024-11-26T10:33:29,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:29,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617269912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:29,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617269912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:30,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617270015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:30,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617270015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,081 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/18392aab8ccd40839d41bc2529e84618 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/18392aab8ccd40839d41bc2529e84618 2024-11-26T10:33:30,088 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into 18392aab8ccd40839d41bc2529e84618(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:30,088 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:30,088 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=13, startTime=1732617209631; duration=0sec 2024-11-26T10:33:30,088 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:30,088 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:30,089 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:30,091 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:30,092 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/C is initiating minor compaction (all files) 2024-11-26T10:33:30,092 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/C in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:30,092 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/18050d6399b74bd0a59fc07358f7b6b7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/5df75b3c7c524d278476a1aebe06869e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/cf1ce271423c49069168fa868877a4f7] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=34.6 K 2024-11-26T10:33:30,093 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 18050d6399b74bd0a59fc07358f7b6b7, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=436, earliestPutTs=1732617206569 2024-11-26T10:33:30,093 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 5df75b3c7c524d278476a1aebe06869e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732617207613 2024-11-26T10:33:30,094 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting cf1ce271423c49069168fa868877a4f7, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732617208247 2024-11-26T10:33:30,095 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/30669ddb4045465689a655502cd1ce02 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/30669ddb4045465689a655502cd1ce02 2024-11-26T10:33:30,101 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into 30669ddb4045465689a655502cd1ce02(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:30,102 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:30,102 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=13, startTime=1732617209631; duration=0sec 2024-11-26T10:33:30,102 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:30,102 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:30,116 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#C#compaction#104 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:30,117 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/852eb318e4d74c25919bb5aa2f8e7fce is 50, key is test_row_0/C:col10/1732617208247/Put/seqid=0 2024-11-26T10:33:30,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741943_1119 (size=13425) 2024-11-26T10:33:30,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:30,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:30,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617270218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617270219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/3bfe3cb996254638adcbff9191a44c26 2024-11-26T10:33:30,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/626905ace789471a91581afb0682b31e is 50, key is test_row_0/B:col10/1732617208760/Put/seqid=0 2024-11-26T10:33:30,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741944_1120 (size=12301) 2024-11-26T10:33:30,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617270368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617270373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-26T10:33:30,493 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-26T10:33:30,494 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-26T10:33:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-26T10:33:30,496 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:30,496 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:30,497 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:30,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:30,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617270523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617270523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,591 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/852eb318e4d74c25919bb5aa2f8e7fce as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/852eb318e4d74c25919bb5aa2f8e7fce 2024-11-26T10:33:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-26T10:33:30,598 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/C of 6a931c8e80842c8947954ecd8357e9ad into 852eb318e4d74c25919bb5aa2f8e7fce(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:30,598 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:30,598 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/C, priority=13, startTime=1732617209632; duration=0sec 2024-11-26T10:33:30,598 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:30,598 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:30,648 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-26T10:33:30,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:30,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:30,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:30,649 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:30,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:30,713 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/626905ace789471a91581afb0682b31e 2024-11-26T10:33:30,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/130adbcb49fe47e4ac3c6f0c44d6b793 is 50, key is test_row_0/C:col10/1732617208760/Put/seqid=0 2024-11-26T10:33:30,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617270723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,724 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:33:30,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741945_1121 (size=12301) 2024-11-26T10:33:30,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-26T10:33:30,801 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-26T10:33:30,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:30,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:30,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:30,802 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:30,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:30,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:30,954 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:30,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-26T10:33:30,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:30,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:30,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:30,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:30,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:30,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:31,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:31,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617271028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:31,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:31,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617271030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:31,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-26T10:33:31,107 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:31,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-26T10:33:31,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:31,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:31,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:31,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:31,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:31,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:31,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/130adbcb49fe47e4ac3c6f0c44d6b793 2024-11-26T10:33:31,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/3bfe3cb996254638adcbff9191a44c26 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3bfe3cb996254638adcbff9191a44c26 2024-11-26T10:33:31,138 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3bfe3cb996254638adcbff9191a44c26, entries=200, sequenceid=500, filesize=14.4 K 2024-11-26T10:33:31,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/626905ace789471a91581afb0682b31e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/626905ace789471a91581afb0682b31e 2024-11-26T10:33:31,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/626905ace789471a91581afb0682b31e, entries=150, sequenceid=500, filesize=12.0 K 2024-11-26T10:33:31,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/130adbcb49fe47e4ac3c6f0c44d6b793 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/130adbcb49fe47e4ac3c6f0c44d6b793 2024-11-26T10:33:31,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/130adbcb49fe47e4ac3c6f0c44d6b793, entries=150, sequenceid=500, filesize=12.0 K 2024-11-26T10:33:31,153 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 6a931c8e80842c8947954ecd8357e9ad in 1278ms, sequenceid=500, compaction requested=false 2024-11-26T10:33:31,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:31,259 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:31,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-26T10:33:31,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:31,260 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-26T10:33:31,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:31,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:31,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:31,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:31,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:31,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:31,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/704294cab77944558f8ab0b1ca519e66 is 50, key is test_row_0/A:col10/1732617209896/Put/seqid=0 2024-11-26T10:33:31,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741946_1122 (size=12301) 2024-11-26T10:33:31,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-26T10:33:31,670 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/704294cab77944558f8ab0b1ca519e66 2024-11-26T10:33:31,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/6cad3177f9d341fdb1db0f74dd4c60f5 is 50, key is test_row_0/B:col10/1732617209896/Put/seqid=0 2024-11-26T10:33:31,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741947_1123 (size=12301) 2024-11-26T10:33:31,713 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/6cad3177f9d341fdb1db0f74dd4c60f5 2024-11-26T10:33:31,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/6ab8a55f8d6048489d0d878eff2d758c is 50, key is test_row_0/C:col10/1732617209896/Put/seqid=0 2024-11-26T10:33:31,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741948_1124 (size=12301) 2024-11-26T10:33:31,730 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/6ab8a55f8d6048489d0d878eff2d758c 2024-11-26T10:33:31,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/704294cab77944558f8ab0b1ca519e66 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/704294cab77944558f8ab0b1ca519e66 2024-11-26T10:33:31,743 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/704294cab77944558f8ab0b1ca519e66, entries=150, sequenceid=515, filesize=12.0 K 2024-11-26T10:33:31,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/6cad3177f9d341fdb1db0f74dd4c60f5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/6cad3177f9d341fdb1db0f74dd4c60f5 2024-11-26T10:33:31,751 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/6cad3177f9d341fdb1db0f74dd4c60f5, entries=150, sequenceid=515, filesize=12.0 K 2024-11-26T10:33:31,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/6ab8a55f8d6048489d0d878eff2d758c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/6ab8a55f8d6048489d0d878eff2d758c 2024-11-26T10:33:31,758 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/6ab8a55f8d6048489d0d878eff2d758c, entries=150, sequenceid=515, filesize=12.0 K 2024-11-26T10:33:31,759 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for 6a931c8e80842c8947954ecd8357e9ad in 499ms, sequenceid=515, compaction requested=true 2024-11-26T10:33:31,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:31,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:31,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-26T10:33:31,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-26T10:33:31,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-26T10:33:31,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2650 sec 2024-11-26T10:33:31,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 1.2750 sec 2024-11-26T10:33:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:32,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-26T10:33:32,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:32,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:32,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:32,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:32,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:32,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:32,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/ae1b9b5112ba4807ab9fd59039cad2d4 is 50, key is test_row_0/A:col10/1732617212038/Put/seqid=0 2024-11-26T10:33:32,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741949_1125 (size=14741) 2024-11-26T10:33:32,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=526 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/ae1b9b5112ba4807ab9fd59039cad2d4 2024-11-26T10:33:32,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/828fca2c8c60452ab050320e35723a82 is 50, key is test_row_0/B:col10/1732617212038/Put/seqid=0 2024-11-26T10:33:32,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617272077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617272077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741950_1126 (size=12301) 2024-11-26T10:33:32,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617272180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617272180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33192 deadline: 1732617272379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,380 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4130 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:33:32,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33228 deadline: 1732617272382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,384 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:33:32,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617272384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617272384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,486 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=526 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/828fca2c8c60452ab050320e35723a82 2024-11-26T10:33:32,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e3f2bb8beb0f4986b90f022909b93133 is 50, key is test_row_0/C:col10/1732617212038/Put/seqid=0 2024-11-26T10:33:32,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741951_1127 (size=12301) 2024-11-26T10:33:32,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=526 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e3f2bb8beb0f4986b90f022909b93133 2024-11-26T10:33:32,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/ae1b9b5112ba4807ab9fd59039cad2d4 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ae1b9b5112ba4807ab9fd59039cad2d4 2024-11-26T10:33:32,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ae1b9b5112ba4807ab9fd59039cad2d4, entries=200, sequenceid=526, filesize=14.4 K 2024-11-26T10:33:32,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/828fca2c8c60452ab050320e35723a82 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/828fca2c8c60452ab050320e35723a82 2024-11-26T10:33:32,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/828fca2c8c60452ab050320e35723a82, entries=150, sequenceid=526, filesize=12.0 K 2024-11-26T10:33:32,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/e3f2bb8beb0f4986b90f022909b93133 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e3f2bb8beb0f4986b90f022909b93133 2024-11-26T10:33:32,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e3f2bb8beb0f4986b90f022909b93133, entries=150, sequenceid=526, filesize=12.0 K 2024-11-26T10:33:32,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6a931c8e80842c8947954ecd8357e9ad in 480ms, sequenceid=526, compaction requested=true 2024-11-26T10:33:32,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:32,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:32,521 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:32,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:32,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:32,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:32,521 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:32,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:32,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:32,522 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55242 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:32,522 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:32,522 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:32,523 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50362 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:32,523 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/30669ddb4045465689a655502cd1ce02, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3bfe3cb996254638adcbff9191a44c26, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/704294cab77944558f8ab0b1ca519e66, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ae1b9b5112ba4807ab9fd59039cad2d4] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=53.9 K 2024-11-26T10:33:32,523 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:32,523 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:32,523 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/18392aab8ccd40839d41bc2529e84618, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/626905ace789471a91581afb0682b31e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/6cad3177f9d341fdb1db0f74dd4c60f5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/828fca2c8c60452ab050320e35723a82] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=49.2 K 2024-11-26T10:33:32,523 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30669ddb4045465689a655502cd1ce02, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732617207622 2024-11-26T10:33:32,523 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bfe3cb996254638adcbff9191a44c26, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1732617208748 2024-11-26T10:33:32,523 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 18392aab8ccd40839d41bc2529e84618, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732617207622 2024-11-26T10:33:32,524 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 704294cab77944558f8ab0b1ca519e66, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732617209887 2024-11-26T10:33:32,524 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 626905ace789471a91581afb0682b31e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1732617208759 2024-11-26T10:33:32,524 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae1b9b5112ba4807ab9fd59039cad2d4, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=526, earliestPutTs=1732617212034 2024-11-26T10:33:32,524 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 6cad3177f9d341fdb1db0f74dd4c60f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732617209887 2024-11-26T10:33:32,525 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 828fca2c8c60452ab050320e35723a82, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=526, earliestPutTs=1732617212034 2024-11-26T10:33:32,534 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#113 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:32,534 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#114 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:32,535 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/58e1295a303946bc8e77ff608fa33a85 is 50, key is test_row_0/A:col10/1732617212038/Put/seqid=0 2024-11-26T10:33:32,535 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/db910c7f3cae44d3aa34c6b5aa74851f is 50, key is test_row_0/B:col10/1732617212038/Put/seqid=0 2024-11-26T10:33:32,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741952_1128 (size=13595) 2024-11-26T10:33:32,545 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/58e1295a303946bc8e77ff608fa33a85 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/58e1295a303946bc8e77ff608fa33a85 2024-11-26T10:33:32,550 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into 58e1295a303946bc8e77ff608fa33a85(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:32,550 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:32,550 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=12, startTime=1732617212521; duration=0sec 2024-11-26T10:33:32,551 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:32,551 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:32,551 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:32,552 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:32,552 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/C is initiating minor compaction (all files) 2024-11-26T10:33:32,552 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/C in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:32,553 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/852eb318e4d74c25919bb5aa2f8e7fce, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/130adbcb49fe47e4ac3c6f0c44d6b793, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/6ab8a55f8d6048489d0d878eff2d758c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e3f2bb8beb0f4986b90f022909b93133] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=49.1 K 2024-11-26T10:33:32,553 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 852eb318e4d74c25919bb5aa2f8e7fce, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732617207622 2024-11-26T10:33:32,553 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 130adbcb49fe47e4ac3c6f0c44d6b793, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1732617208759 2024-11-26T10:33:32,554 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ab8a55f8d6048489d0d878eff2d758c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732617209887 2024-11-26T10:33:32,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741953_1129 (size=13595) 2024-11-26T10:33:32,555 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3f2bb8beb0f4986b90f022909b93133, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=526, earliestPutTs=1732617212034 2024-11-26T10:33:32,560 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/db910c7f3cae44d3aa34c6b5aa74851f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/db910c7f3cae44d3aa34c6b5aa74851f 2024-11-26T10:33:32,566 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#C#compaction#115 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:32,566 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into db910c7f3cae44d3aa34c6b5aa74851f(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:32,566 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:32,566 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=12, startTime=1732617212521; duration=0sec 2024-11-26T10:33:32,566 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:32,567 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:32,567 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/fcbe5f18193741448fc1a22c146d5df3 is 50, key is test_row_0/C:col10/1732617212038/Put/seqid=0 2024-11-26T10:33:32,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741954_1130 (size=13561) 2024-11-26T10:33:32,578 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/fcbe5f18193741448fc1a22c146d5df3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/fcbe5f18193741448fc1a22c146d5df3 2024-11-26T10:33:32,583 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/C of 6a931c8e80842c8947954ecd8357e9ad into fcbe5f18193741448fc1a22c146d5df3(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:32,583 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:32,583 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/C, priority=12, startTime=1732617212521; duration=0sec 2024-11-26T10:33:32,583 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:32,583 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:32,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-26T10:33:32,599 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-26T10:33:32,600 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:32,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees 2024-11-26T10:33:32,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-26T10:33:32,601 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:32,602 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:32,602 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:32,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:32,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-26T10:33:32,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:32,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:32,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:32,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:32,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:32,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:32,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/701fb88b34a545099f03d1c3fcae3477 is 50, key is test_row_0/A:col10/1732617212066/Put/seqid=0 2024-11-26T10:33:32,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617272696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617272698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741955_1131 (size=14741) 2024-11-26T10:33:32,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-26T10:33:32,753 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-26T10:33:32,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:32,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:32,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:32,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:32,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:32,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:32,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:32,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617272799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617272799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-26T10:33:32,906 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:32,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-26T10:33:32,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:32,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:32,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:32,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:32,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:32,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:33,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617273003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:33,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617273004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,058 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-26T10:33:33,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:33,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:33,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:33,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/701fb88b34a545099f03d1c3fcae3477 2024-11-26T10:33:33,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/0290077a6659470faf6591fdff5edd9f is 50, key is test_row_0/B:col10/1732617212066/Put/seqid=0 2024-11-26T10:33:33,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741956_1132 (size=12301) 2024-11-26T10:33:33,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-26T10:33:33,211 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-26T10:33:33,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:33,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:33,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:33,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:33,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617273304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:33,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617273305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,363 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-26T10:33:33,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:33,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:33,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:33,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,516 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/0290077a6659470faf6591fdff5edd9f 2024-11-26T10:33:33,517 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-26T10:33:33,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:33,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:33,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:33,518 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:33,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/4cbd40fe6a374e3f93d0cde2f3098060 is 50, key is test_row_0/C:col10/1732617212066/Put/seqid=0 2024-11-26T10:33:33,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741957_1133 (size=12301) 2024-11-26T10:33:33,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/4cbd40fe6a374e3f93d0cde2f3098060 2024-11-26T10:33:33,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/701fb88b34a545099f03d1c3fcae3477 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/701fb88b34a545099f03d1c3fcae3477 2024-11-26T10:33:33,543 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/701fb88b34a545099f03d1c3fcae3477, entries=200, sequenceid=555, filesize=14.4 K 2024-11-26T10:33:33,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/0290077a6659470faf6591fdff5edd9f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0290077a6659470faf6591fdff5edd9f 2024-11-26T10:33:33,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0290077a6659470faf6591fdff5edd9f, entries=150, sequenceid=555, filesize=12.0 K 2024-11-26T10:33:33,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/4cbd40fe6a374e3f93d0cde2f3098060 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4cbd40fe6a374e3f93d0cde2f3098060 2024-11-26T10:33:33,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4cbd40fe6a374e3f93d0cde2f3098060, entries=150, sequenceid=555, filesize=12.0 K 2024-11-26T10:33:33,559 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(992): StoreScanner already has the close lock. There is no need to updateReaders 2024-11-26T10:33:33,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6a931c8e80842c8947954ecd8357e9ad in 871ms, sequenceid=555, compaction requested=false 2024-11-26T10:33:33,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:33,670 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-26T10:33:33,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:33,671 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-26T10:33:33,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:33,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:33,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:33,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:33,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:33,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:33,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/659e09ccd8104b9d9e5146679772f17b is 50, key is test_row_0/A:col10/1732617212692/Put/seqid=0 2024-11-26T10:33:33,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741958_1134 (size=9857) 2024-11-26T10:33:33,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-26T10:33:33,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. as already flushing 2024-11-26T10:33:33,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:33,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:33,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617273840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:33,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617273840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:33,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617273945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:33,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:33,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617273945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:34,081 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/659e09ccd8104b9d9e5146679772f17b 2024-11-26T10:33:34,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/607e256a1e444ad5a8e6ab5a9a52d68e is 50, key is test_row_0/B:col10/1732617212692/Put/seqid=0 2024-11-26T10:33:34,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741959_1135 (size=9857) 2024-11-26T10:33:34,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:34,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617274146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:34,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:34,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617274147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:34,155 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:61934 2024-11-26T10:33:34,155 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:61934 2024-11-26T10:33:34,155 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c915d17 to 127.0.0.1:61934 2024-11-26T10:33:34,155 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:33:34,155 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:33:34,155 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:33:34,156 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:61934 2024-11-26T10:33:34,156 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:33:34,343 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:33:34,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:34,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:34,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33206 deadline: 1732617274450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:34,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33222 deadline: 1732617274450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:34,501 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/607e256a1e444ad5a8e6ab5a9a52d68e 2024-11-26T10:33:34,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/724bab43217749b28e265e26166fcc9c is 50, key is test_row_0/C:col10/1732617212692/Put/seqid=0 2024-11-26T10:33:34,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741960_1136 (size=9857) 2024-11-26T10:33:34,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-26T10:33:34,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:34,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33248 deadline: 1732617274729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:34,730 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:33:34,918 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/724bab43217749b28e265e26166fcc9c 2024-11-26T10:33:34,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/659e09ccd8104b9d9e5146679772f17b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/659e09ccd8104b9d9e5146679772f17b 2024-11-26T10:33:34,932 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/659e09ccd8104b9d9e5146679772f17b, entries=100, sequenceid=565, filesize=9.6 K 2024-11-26T10:33:34,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/607e256a1e444ad5a8e6ab5a9a52d68e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/607e256a1e444ad5a8e6ab5a9a52d68e 2024-11-26T10:33:34,937 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/607e256a1e444ad5a8e6ab5a9a52d68e, entries=100, sequenceid=565, filesize=9.6 K 2024-11-26T10:33:34,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/724bab43217749b28e265e26166fcc9c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/724bab43217749b28e265e26166fcc9c 2024-11-26T10:33:34,943 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/724bab43217749b28e265e26166fcc9c, entries=100, sequenceid=565, filesize=9.6 K 2024-11-26T10:33:34,943 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 6a931c8e80842c8947954ecd8357e9ad in 1272ms, sequenceid=565, compaction requested=true 2024-11-26T10:33:34,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:34,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:34,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-26T10:33:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=35 2024-11-26T10:33:34,946 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-26T10:33:34,946 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3430 sec 2024-11-26T10:33:34,947 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees in 2.3460 sec 2024-11-26T10:33:34,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:34,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-26T10:33:34,957 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22cb07dd to 127.0.0.1:61934 2024-11-26T10:33:34,957 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:33:34,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:34,957 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38766d64 to 127.0.0.1:61934 2024-11-26T10:33:34,957 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:33:34,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:34,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:34,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:34,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:34,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:34,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/fba3d20487b24ff48d940288412d2d72 is 50, key is test_row_0/A:col10/1732617214955/Put/seqid=0 2024-11-26T10:33:34,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741961_1137 (size=12301) 2024-11-26T10:33:35,369 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=594 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/fba3d20487b24ff48d940288412d2d72 2024-11-26T10:33:35,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/15909b54092e40e6b2a764a9025108ca is 50, key is test_row_0/B:col10/1732617214955/Put/seqid=0 2024-11-26T10:33:35,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741962_1138 (size=12301) 2024-11-26T10:33:35,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=594 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/15909b54092e40e6b2a764a9025108ca 2024-11-26T10:33:35,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/c6aad5c760164f7ebd9262bcc5e73afa is 50, key is test_row_0/C:col10/1732617214955/Put/seqid=0 2024-11-26T10:33:35,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741963_1139 (size=12301) 2024-11-26T10:33:36,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=594 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/c6aad5c760164f7ebd9262bcc5e73afa 2024-11-26T10:33:36,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/fba3d20487b24ff48d940288412d2d72 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/fba3d20487b24ff48d940288412d2d72 2024-11-26T10:33:36,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/fba3d20487b24ff48d940288412d2d72, entries=150, sequenceid=594, filesize=12.0 K 2024-11-26T10:33:36,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/15909b54092e40e6b2a764a9025108ca as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/15909b54092e40e6b2a764a9025108ca 2024-11-26T10:33:36,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/15909b54092e40e6b2a764a9025108ca, entries=150, sequenceid=594, filesize=12.0 K 2024-11-26T10:33:36,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/c6aad5c760164f7ebd9262bcc5e73afa as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/c6aad5c760164f7ebd9262bcc5e73afa 2024-11-26T10:33:36,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/c6aad5c760164f7ebd9262bcc5e73afa, entries=150, sequenceid=594, filesize=12.0 K 2024-11-26T10:33:36,239 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=0 B/0 for 6a931c8e80842c8947954ecd8357e9ad in 1283ms, sequenceid=594, compaction requested=true 2024-11-26T10:33:36,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:36,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:36,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:36,240 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:36,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:36,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:36,240 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:36,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a931c8e80842c8947954ecd8357e9ad:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:36,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:36,241 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50494 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:36,241 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48054 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:36,241 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/A is initiating minor compaction (all files) 2024-11-26T10:33:36,241 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/B is initiating minor compaction (all files) 2024-11-26T10:33:36,242 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/A in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:36,242 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/B in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:36,242 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/db910c7f3cae44d3aa34c6b5aa74851f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0290077a6659470faf6591fdff5edd9f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/607e256a1e444ad5a8e6ab5a9a52d68e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/15909b54092e40e6b2a764a9025108ca] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=46.9 K 2024-11-26T10:33:36,242 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/58e1295a303946bc8e77ff608fa33a85, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/701fb88b34a545099f03d1c3fcae3477, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/659e09ccd8104b9d9e5146679772f17b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/fba3d20487b24ff48d940288412d2d72] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=49.3 K 2024-11-26T10:33:36,242 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting db910c7f3cae44d3aa34c6b5aa74851f, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=526, earliestPutTs=1732617212034 2024-11-26T10:33:36,242 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58e1295a303946bc8e77ff608fa33a85, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=526, earliestPutTs=1732617212034 2024-11-26T10:33:36,243 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 0290077a6659470faf6591fdff5edd9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732617212066 2024-11-26T10:33:36,243 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 701fb88b34a545099f03d1c3fcae3477, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732617212066 2024-11-26T10:33:36,243 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 659e09ccd8104b9d9e5146679772f17b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=565, earliestPutTs=1732617212692 2024-11-26T10:33:36,243 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 607e256a1e444ad5a8e6ab5a9a52d68e, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=565, earliestPutTs=1732617212692 2024-11-26T10:33:36,243 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting fba3d20487b24ff48d940288412d2d72, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=594, earliestPutTs=1732617213839 2024-11-26T10:33:36,243 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 15909b54092e40e6b2a764a9025108ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=594, earliestPutTs=1732617213839 2024-11-26T10:33:36,253 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#A#compaction#125 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:36,253 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#B#compaction#126 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:36,254 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/54178a599846437ba70e2c45b326ded5 is 50, key is test_row_0/A:col10/1732617214955/Put/seqid=0 2024-11-26T10:33:36,254 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/20af0c1aa8544bcb81d07f7c9153b71d is 50, key is test_row_0/B:col10/1732617214955/Put/seqid=0 2024-11-26T10:33:36,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741965_1141 (size=13731) 2024-11-26T10:33:36,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741964_1140 (size=13731) 2024-11-26T10:33:36,393 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5400112e to 127.0.0.1:61934 2024-11-26T10:33:36,393 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:33:36,403 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:61934 2024-11-26T10:33:36,403 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:33:36,669 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/54178a599846437ba70e2c45b326ded5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/54178a599846437ba70e2c45b326ded5 2024-11-26T10:33:36,669 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/20af0c1aa8544bcb81d07f7c9153b71d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/20af0c1aa8544bcb81d07f7c9153b71d 2024-11-26T10:33:36,675 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/A of 6a931c8e80842c8947954ecd8357e9ad into 54178a599846437ba70e2c45b326ded5(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:36,675 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/B of 6a931c8e80842c8947954ecd8357e9ad into 20af0c1aa8544bcb81d07f7c9153b71d(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:36,675 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:36,675 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:36,675 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/A, priority=12, startTime=1732617216240; duration=0sec 2024-11-26T10:33:36,675 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/B, priority=12, startTime=1732617216240; duration=0sec 2024-11-26T10:33:36,675 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:36,675 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:A 2024-11-26T10:33:36,675 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:36,675 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:33:36,675 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:B 2024-11-26T10:33:36,676 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48020 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:33:36,676 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 6a931c8e80842c8947954ecd8357e9ad/C is initiating minor compaction (all files) 2024-11-26T10:33:36,676 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6a931c8e80842c8947954ecd8357e9ad/C in TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:36,677 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/fcbe5f18193741448fc1a22c146d5df3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4cbd40fe6a374e3f93d0cde2f3098060, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/724bab43217749b28e265e26166fcc9c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/c6aad5c760164f7ebd9262bcc5e73afa] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp, totalSize=46.9 K 2024-11-26T10:33:36,677 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting fcbe5f18193741448fc1a22c146d5df3, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=526, earliestPutTs=1732617212034 2024-11-26T10:33:36,677 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cbd40fe6a374e3f93d0cde2f3098060, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732617212066 2024-11-26T10:33:36,678 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 724bab43217749b28e265e26166fcc9c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=565, earliestPutTs=1732617212692 2024-11-26T10:33:36,678 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6aad5c760164f7ebd9262bcc5e73afa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=594, earliestPutTs=1732617213839 2024-11-26T10:33:36,686 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a931c8e80842c8947954ecd8357e9ad#C#compaction#127 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:36,687 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/af362d86d9f74a009bbd0a0a1f0d1133 is 50, key is test_row_0/C:col10/1732617214955/Put/seqid=0 2024-11-26T10:33:36,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741966_1142 (size=13697) 2024-11-26T10:33:36,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-26T10:33:36,708 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-26T10:33:37,101 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/af362d86d9f74a009bbd0a0a1f0d1133 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/af362d86d9f74a009bbd0a0a1f0d1133 2024-11-26T10:33:37,110 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6a931c8e80842c8947954ecd8357e9ad/C of 6a931c8e80842c8947954ecd8357e9ad into af362d86d9f74a009bbd0a0a1f0d1133(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:37,110 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:37,111 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad., storeName=6a931c8e80842c8947954ecd8357e9ad/C, priority=12, startTime=1732617216240; duration=0sec 2024-11-26T10:33:37,111 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:37,111 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a931c8e80842c8947954ecd8357e9ad:C 2024-11-26T10:33:41,568 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-26T10:33:41,571 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43070, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-26T10:33:44,796 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:61934 2024-11-26T10:33:44,796 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:33:44,796 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-26T10:33:44,796 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-11-26T10:33:44,796 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 118 2024-11-26T10:33:44,796 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-11-26T10:33:44,796 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-11-26T10:33:44,796 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 119 2024-11-26T10:33:44,796 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-26T10:33:44,796 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6880 2024-11-26T10:33:44,796 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6714 2024-11-26T10:33:44,796 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-26T10:33:44,797 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3002 2024-11-26T10:33:44,797 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9004 rows 2024-11-26T10:33:44,797 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2993 2024-11-26T10:33:44,797 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8977 rows 2024-11-26T10:33:44,797 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-26T10:33:44,797 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f6e36fe to 127.0.0.1:61934 2024-11-26T10:33:44,797 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:33:44,801 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-26T10:33:44,806 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-26T10:33:44,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-26T10:33:44,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-26T10:33:44,813 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617224812"}]},"ts":"1732617224812"} 2024-11-26T10:33:44,814 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-26T10:33:44,850 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-26T10:33:44,852 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-26T10:33:44,858 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a931c8e80842c8947954ecd8357e9ad, UNASSIGN}] 2024-11-26T10:33:44,859 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a931c8e80842c8947954ecd8357e9ad, UNASSIGN 2024-11-26T10:33:44,860 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=6a931c8e80842c8947954ecd8357e9ad, regionState=CLOSING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:44,862 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-26T10:33:44,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; CloseRegionProcedure 6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:33:44,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-26T10:33:45,020 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:45,023 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(124): Close 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:45,023 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-26T10:33:45,024 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1681): Closing 6a931c8e80842c8947954ecd8357e9ad, disabling compactions & flushes 2024-11-26T10:33:45,025 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:45,025 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:45,025 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. after waiting 0 ms 2024-11-26T10:33:45,025 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:45,025 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(2837): Flushing 6a931c8e80842c8947954ecd8357e9ad 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-26T10:33:45,026 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=A 2024-11-26T10:33:45,026 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:45,026 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=B 2024-11-26T10:33:45,026 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:45,026 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6a931c8e80842c8947954ecd8357e9ad, store=C 2024-11-26T10:33:45,026 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:45,033 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/223373147862429c9eca07260443d362 is 50, key is test_row_0/A:col10/1732617216401/Put/seqid=0 2024-11-26T10:33:45,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741967_1143 (size=9857) 2024-11-26T10:33:45,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-26T10:33:45,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-26T10:33:45,438 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/223373147862429c9eca07260443d362 2024-11-26T10:33:45,447 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/9bd0212936744afbb1eef4588f55a74e is 50, key is test_row_0/B:col10/1732617216401/Put/seqid=0 2024-11-26T10:33:45,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741968_1144 (size=9857) 2024-11-26T10:33:45,853 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/9bd0212936744afbb1eef4588f55a74e 2024-11-26T10:33:45,871 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/fe88e3207b9d4153a5b47e086cfa606a is 50, key is test_row_0/C:col10/1732617216401/Put/seqid=0 2024-11-26T10:33:45,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741969_1145 (size=9857) 2024-11-26T10:33:45,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-26T10:33:46,277 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/fe88e3207b9d4153a5b47e086cfa606a 2024-11-26T10:33:46,288 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/A/223373147862429c9eca07260443d362 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/223373147862429c9eca07260443d362 2024-11-26T10:33:46,292 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/223373147862429c9eca07260443d362, entries=100, sequenceid=603, filesize=9.6 K 2024-11-26T10:33:46,293 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/B/9bd0212936744afbb1eef4588f55a74e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9bd0212936744afbb1eef4588f55a74e 2024-11-26T10:33:46,297 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9bd0212936744afbb1eef4588f55a74e, entries=100, sequenceid=603, filesize=9.6 K 2024-11-26T10:33:46,298 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/.tmp/C/fe88e3207b9d4153a5b47e086cfa606a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/fe88e3207b9d4153a5b47e086cfa606a 2024-11-26T10:33:46,302 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/fe88e3207b9d4153a5b47e086cfa606a, entries=100, sequenceid=603, filesize=9.6 K 2024-11-26T10:33:46,303 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 6a931c8e80842c8947954ecd8357e9ad in 1278ms, sequenceid=603, compaction requested=false 2024-11-26T10:33:46,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/57d5077aa3ab4572bd1036d9e6ae3016, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8be99741b59e4581a589a6f6632be054, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/70ad99ed8b554192bbe90cf5558280d3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/2ff3cb97124c4d0d8101e3bee5de081f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/5da30acb7eba43a9aa588fa08641de0d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/46562d5f369a4a48a966a4bc6df5a179, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/565980643b87436aabc58c9e36403499, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac301cb95de4d4ab6358e9f760a69e9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/0369380f4cd14e769c15403052a6e651, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/d531626397d9487d9c107443fb560921, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/567247a7157949dd9fddc725ab217955, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/7c7ffa2db9ad46be9618a74fe33f8864, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/46f771c0db9546148b03c9a42c1999d2, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/a0ef14091e624b8c8192066cc9c8f2a9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c5c6a89542a6482c88a60625fe465903, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/1d767e67856147f382e034042d14c2ce, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c4ada2985a7146f08d482ccc41b7e113, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/e5a4029ac9014bd0ae93a97e97dc9ad6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ed35e0ca9998430caa077c85dc9169c8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8f92facc5d574cf9923499bb93f1d85c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/50a6f2a001084d0490759da45e3d01b4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/74510dfefcfa406daa9f828c18213890, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/77df0c371bc14ca89ed97e622f6b1f7d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/997ce537c8434277934b02e82512a699, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/b9ca1a9c7df4417598ae70c15aa8b63f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/f5756f155680495b90888db72c264a32, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/efc5583d048540a5b0f27ca42ece6c67, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/895142e2f0d3499c9f5e77456e999b15, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3373fd420b3a46a99ed212d476ea7a74, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/858ffa5dd2af4655b52e75d165b10e75, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/00d528e38e2643db8651344842949d80, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac35095e42d4a30b83efc8a5bdc2360, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8ab2b2b3e9da4efbb978ba131a4649e2, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/30669ddb4045465689a655502cd1ce02, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/81109afc88ab4a9184f8702d5424f6ee, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3bfe3cb996254638adcbff9191a44c26, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/704294cab77944558f8ab0b1ca519e66, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ae1b9b5112ba4807ab9fd59039cad2d4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/58e1295a303946bc8e77ff608fa33a85, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/701fb88b34a545099f03d1c3fcae3477, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/659e09ccd8104b9d9e5146679772f17b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/fba3d20487b24ff48d940288412d2d72] to archive 2024-11-26T10:33:46,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:33:46,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/57d5077aa3ab4572bd1036d9e6ae3016 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/57d5077aa3ab4572bd1036d9e6ae3016 2024-11-26T10:33:46,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8be99741b59e4581a589a6f6632be054 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8be99741b59e4581a589a6f6632be054 2024-11-26T10:33:46,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/70ad99ed8b554192bbe90cf5558280d3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/70ad99ed8b554192bbe90cf5558280d3 2024-11-26T10:33:46,315 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/2ff3cb97124c4d0d8101e3bee5de081f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/2ff3cb97124c4d0d8101e3bee5de081f 2024-11-26T10:33:46,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/5da30acb7eba43a9aa588fa08641de0d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/5da30acb7eba43a9aa588fa08641de0d 2024-11-26T10:33:46,318 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/46562d5f369a4a48a966a4bc6df5a179 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/46562d5f369a4a48a966a4bc6df5a179 2024-11-26T10:33:46,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/565980643b87436aabc58c9e36403499 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/565980643b87436aabc58c9e36403499 2024-11-26T10:33:46,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac301cb95de4d4ab6358e9f760a69e9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac301cb95de4d4ab6358e9f760a69e9 2024-11-26T10:33:46,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/0369380f4cd14e769c15403052a6e651 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/0369380f4cd14e769c15403052a6e651 2024-11-26T10:33:46,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/d531626397d9487d9c107443fb560921 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/d531626397d9487d9c107443fb560921 2024-11-26T10:33:46,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/567247a7157949dd9fddc725ab217955 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/567247a7157949dd9fddc725ab217955 2024-11-26T10:33:46,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/7c7ffa2db9ad46be9618a74fe33f8864 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/7c7ffa2db9ad46be9618a74fe33f8864 2024-11-26T10:33:46,326 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/46f771c0db9546148b03c9a42c1999d2 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/46f771c0db9546148b03c9a42c1999d2 2024-11-26T10:33:46,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/a0ef14091e624b8c8192066cc9c8f2a9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/a0ef14091e624b8c8192066cc9c8f2a9 2024-11-26T10:33:46,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c5c6a89542a6482c88a60625fe465903 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c5c6a89542a6482c88a60625fe465903 2024-11-26T10:33:46,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/1d767e67856147f382e034042d14c2ce to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/1d767e67856147f382e034042d14c2ce 2024-11-26T10:33:46,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c4ada2985a7146f08d482ccc41b7e113 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/c4ada2985a7146f08d482ccc41b7e113 2024-11-26T10:33:46,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/e5a4029ac9014bd0ae93a97e97dc9ad6 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/e5a4029ac9014bd0ae93a97e97dc9ad6 2024-11-26T10:33:46,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ed35e0ca9998430caa077c85dc9169c8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ed35e0ca9998430caa077c85dc9169c8 2024-11-26T10:33:46,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8f92facc5d574cf9923499bb93f1d85c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8f92facc5d574cf9923499bb93f1d85c 2024-11-26T10:33:46,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/50a6f2a001084d0490759da45e3d01b4 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/50a6f2a001084d0490759da45e3d01b4 2024-11-26T10:33:46,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/74510dfefcfa406daa9f828c18213890 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/74510dfefcfa406daa9f828c18213890 2024-11-26T10:33:46,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/77df0c371bc14ca89ed97e622f6b1f7d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/77df0c371bc14ca89ed97e622f6b1f7d 2024-11-26T10:33:46,339 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/997ce537c8434277934b02e82512a699 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/997ce537c8434277934b02e82512a699 2024-11-26T10:33:46,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/b9ca1a9c7df4417598ae70c15aa8b63f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/b9ca1a9c7df4417598ae70c15aa8b63f 2024-11-26T10:33:46,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/f5756f155680495b90888db72c264a32 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/f5756f155680495b90888db72c264a32 2024-11-26T10:33:46,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/efc5583d048540a5b0f27ca42ece6c67 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/efc5583d048540a5b0f27ca42ece6c67 2024-11-26T10:33:46,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/895142e2f0d3499c9f5e77456e999b15 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/895142e2f0d3499c9f5e77456e999b15 2024-11-26T10:33:46,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3373fd420b3a46a99ed212d476ea7a74 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3373fd420b3a46a99ed212d476ea7a74 2024-11-26T10:33:46,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/858ffa5dd2af4655b52e75d165b10e75 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/858ffa5dd2af4655b52e75d165b10e75 2024-11-26T10:33:46,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/00d528e38e2643db8651344842949d80 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/00d528e38e2643db8651344842949d80 2024-11-26T10:33:46,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac35095e42d4a30b83efc8a5bdc2360 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/6ac35095e42d4a30b83efc8a5bdc2360 2024-11-26T10:33:46,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8ab2b2b3e9da4efbb978ba131a4649e2 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/8ab2b2b3e9da4efbb978ba131a4649e2 2024-11-26T10:33:46,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/30669ddb4045465689a655502cd1ce02 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/30669ddb4045465689a655502cd1ce02 2024-11-26T10:33:46,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/81109afc88ab4a9184f8702d5424f6ee to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/81109afc88ab4a9184f8702d5424f6ee 2024-11-26T10:33:46,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3bfe3cb996254638adcbff9191a44c26 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/3bfe3cb996254638adcbff9191a44c26 2024-11-26T10:33:46,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/704294cab77944558f8ab0b1ca519e66 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/704294cab77944558f8ab0b1ca519e66 2024-11-26T10:33:46,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ae1b9b5112ba4807ab9fd59039cad2d4 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/ae1b9b5112ba4807ab9fd59039cad2d4 2024-11-26T10:33:46,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/58e1295a303946bc8e77ff608fa33a85 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/58e1295a303946bc8e77ff608fa33a85 2024-11-26T10:33:46,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/701fb88b34a545099f03d1c3fcae3477 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/701fb88b34a545099f03d1c3fcae3477 2024-11-26T10:33:46,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/659e09ccd8104b9d9e5146679772f17b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/659e09ccd8104b9d9e5146679772f17b 2024-11-26T10:33:46,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/fba3d20487b24ff48d940288412d2d72 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/fba3d20487b24ff48d940288412d2d72 2024-11-26T10:33:46,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b10d86ffa79e4c53a6b807ac43c6b78c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0c9ce492bfd34091806038446f5d10a0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/11b06e87694b4c46a8ef4ac327afd461, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/a3e0935f3b44466c9f28d890e0f5adfb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b5fecc90d3db44dea7c24b94338482c9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/3dcbf60bd3c54ca3af25e8d89519ee59, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/c43165e599014bbeba7d8a1758ee1b74, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/2b626ef2efba41439ab483dd9b543ba9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/7276c8ccd07a417694c9d7a4342b910c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17a20d9859f04082b4547325515ac69d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9368a3e55623410ea237fbc1f479a08d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/44413a28a80a43b589c7ef6822d5cc05, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/708018327f13437aa2935e390b795ebb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/1feeae9316774cad9b1afddb2d844685, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9f54751241c84af8b9c55cabe1217ccc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/8be353fc75bb4982ae31b8da8e4f2b17, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/01cec3fba0634940b16d6058d92a8277, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/e02ba23c08df4311bcd363ed228ae7e6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/947713f927f247e59b8f77913580cf52, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/80294c9ff5d74b2da078050a35c10efb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/37d7bbcb7f894d5f9400a2435ffab0cd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/348ceb4cf81349dea56c6a5536c32cf3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/1169269d26d84103b412eae7ff346eaa, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/3bb122fc1a8c4941983ee2a22b11d164, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f6487cefa3564f0bb71edda2cdee708d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/58f9b86484644fcbac52a152bf2856c9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17d44034876e4bf6a17e7583b7a95af5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/234c4ad051cf4774a450c70fd891f1db, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/fb2da11529df4b81b0d6daaa70795398, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/463de66ed9d64cd5a66470e442e80541, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f3e8b446fde8401188984e8dabbc20a8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f8f4290f6bd849758c52b2e5cbd458f0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/ffafd61ae6634365bcc350fbd90187cc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/18392aab8ccd40839d41bc2529e84618, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/7772fca187294aa8ba22d41b257549eb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/626905ace789471a91581afb0682b31e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/6cad3177f9d341fdb1db0f74dd4c60f5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/db910c7f3cae44d3aa34c6b5aa74851f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/828fca2c8c60452ab050320e35723a82, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0290077a6659470faf6591fdff5edd9f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/607e256a1e444ad5a8e6ab5a9a52d68e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/15909b54092e40e6b2a764a9025108ca] to archive 2024-11-26T10:33:46,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:33:46,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b10d86ffa79e4c53a6b807ac43c6b78c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b10d86ffa79e4c53a6b807ac43c6b78c 2024-11-26T10:33:46,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0c9ce492bfd34091806038446f5d10a0 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0c9ce492bfd34091806038446f5d10a0 2024-11-26T10:33:46,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/11b06e87694b4c46a8ef4ac327afd461 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/11b06e87694b4c46a8ef4ac327afd461 2024-11-26T10:33:46,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/a3e0935f3b44466c9f28d890e0f5adfb to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/a3e0935f3b44466c9f28d890e0f5adfb 2024-11-26T10:33:46,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b5fecc90d3db44dea7c24b94338482c9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/b5fecc90d3db44dea7c24b94338482c9 2024-11-26T10:33:46,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/3dcbf60bd3c54ca3af25e8d89519ee59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/3dcbf60bd3c54ca3af25e8d89519ee59 2024-11-26T10:33:46,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/c43165e599014bbeba7d8a1758ee1b74 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/c43165e599014bbeba7d8a1758ee1b74 2024-11-26T10:33:46,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/2b626ef2efba41439ab483dd9b543ba9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/2b626ef2efba41439ab483dd9b543ba9 2024-11-26T10:33:46,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/7276c8ccd07a417694c9d7a4342b910c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/7276c8ccd07a417694c9d7a4342b910c 2024-11-26T10:33:46,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17a20d9859f04082b4547325515ac69d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17a20d9859f04082b4547325515ac69d 2024-11-26T10:33:46,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9368a3e55623410ea237fbc1f479a08d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9368a3e55623410ea237fbc1f479a08d 2024-11-26T10:33:46,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/44413a28a80a43b589c7ef6822d5cc05 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/44413a28a80a43b589c7ef6822d5cc05 2024-11-26T10:33:46,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/708018327f13437aa2935e390b795ebb to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/708018327f13437aa2935e390b795ebb 2024-11-26T10:33:46,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/1feeae9316774cad9b1afddb2d844685 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/1feeae9316774cad9b1afddb2d844685 2024-11-26T10:33:46,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9f54751241c84af8b9c55cabe1217ccc to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9f54751241c84af8b9c55cabe1217ccc 2024-11-26T10:33:46,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/8be353fc75bb4982ae31b8da8e4f2b17 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/8be353fc75bb4982ae31b8da8e4f2b17 2024-11-26T10:33:46,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/01cec3fba0634940b16d6058d92a8277 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/01cec3fba0634940b16d6058d92a8277 2024-11-26T10:33:46,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/e02ba23c08df4311bcd363ed228ae7e6 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/e02ba23c08df4311bcd363ed228ae7e6 2024-11-26T10:33:46,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/947713f927f247e59b8f77913580cf52 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/947713f927f247e59b8f77913580cf52 2024-11-26T10:33:46,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/80294c9ff5d74b2da078050a35c10efb to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/80294c9ff5d74b2da078050a35c10efb 2024-11-26T10:33:46,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/37d7bbcb7f894d5f9400a2435ffab0cd to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/37d7bbcb7f894d5f9400a2435ffab0cd 2024-11-26T10:33:46,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/348ceb4cf81349dea56c6a5536c32cf3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/348ceb4cf81349dea56c6a5536c32cf3 2024-11-26T10:33:46,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/1169269d26d84103b412eae7ff346eaa to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/1169269d26d84103b412eae7ff346eaa 2024-11-26T10:33:46,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/3bb122fc1a8c4941983ee2a22b11d164 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/3bb122fc1a8c4941983ee2a22b11d164 2024-11-26T10:33:46,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f6487cefa3564f0bb71edda2cdee708d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f6487cefa3564f0bb71edda2cdee708d 2024-11-26T10:33:46,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/58f9b86484644fcbac52a152bf2856c9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/58f9b86484644fcbac52a152bf2856c9 2024-11-26T10:33:46,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17d44034876e4bf6a17e7583b7a95af5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/17d44034876e4bf6a17e7583b7a95af5 2024-11-26T10:33:46,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/234c4ad051cf4774a450c70fd891f1db to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/234c4ad051cf4774a450c70fd891f1db 2024-11-26T10:33:46,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/fb2da11529df4b81b0d6daaa70795398 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/fb2da11529df4b81b0d6daaa70795398 2024-11-26T10:33:46,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/463de66ed9d64cd5a66470e442e80541 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/463de66ed9d64cd5a66470e442e80541 2024-11-26T10:33:46,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f3e8b446fde8401188984e8dabbc20a8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f3e8b446fde8401188984e8dabbc20a8 2024-11-26T10:33:46,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f8f4290f6bd849758c52b2e5cbd458f0 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/f8f4290f6bd849758c52b2e5cbd458f0 2024-11-26T10:33:46,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/ffafd61ae6634365bcc350fbd90187cc to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/ffafd61ae6634365bcc350fbd90187cc 2024-11-26T10:33:46,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/18392aab8ccd40839d41bc2529e84618 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/18392aab8ccd40839d41bc2529e84618 2024-11-26T10:33:46,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/7772fca187294aa8ba22d41b257549eb to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/7772fca187294aa8ba22d41b257549eb 2024-11-26T10:33:46,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/626905ace789471a91581afb0682b31e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/626905ace789471a91581afb0682b31e 2024-11-26T10:33:46,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/6cad3177f9d341fdb1db0f74dd4c60f5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/6cad3177f9d341fdb1db0f74dd4c60f5 2024-11-26T10:33:46,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/db910c7f3cae44d3aa34c6b5aa74851f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/db910c7f3cae44d3aa34c6b5aa74851f 2024-11-26T10:33:46,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/828fca2c8c60452ab050320e35723a82 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/828fca2c8c60452ab050320e35723a82 2024-11-26T10:33:46,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0290077a6659470faf6591fdff5edd9f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/0290077a6659470faf6591fdff5edd9f 2024-11-26T10:33:46,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/607e256a1e444ad5a8e6ab5a9a52d68e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/607e256a1e444ad5a8e6ab5a9a52d68e 2024-11-26T10:33:46,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/15909b54092e40e6b2a764a9025108ca to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/15909b54092e40e6b2a764a9025108ca 2024-11-26T10:33:46,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/398abb2cc49d48718b6d75448f7cda1d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e808e7ecb9e140b495c66e945bf9f70f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/f3be9e6d81134218a818f3f35e0aff68, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b8ded2282a824f279bd554ad8b5b596e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4f1af6f0d83d4affa902d16a6112a0a9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1c0333ef61dc49cb8ce8fa3826c53d88, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/369e824438d6472b8bd73d75037d37e8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/42976dfb274049108aa5da1aeb31a7f9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e66901f60da3432d84657cf6f1a5ca37, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a4c067f22df4485a9f25a940e7d5b06a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/8c5790d18b9a4f79913bca1faa0afde1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/f52af82ae0c54109ada088bf62ad123f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/543aedca104e48679af3fd3ad13824cf, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/253e67d32e0e400c947f5ebf79a9fc40, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/bab66df00b63458494b13f6245dbc828, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/dac6b4717845476e817110decb445c14, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/14194599004b4968a5cb09dbfc0b4131, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/de0c65b0449c4c43bea280c0a1bff8b0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/526147ad62ac4068b33cad0b3992b418, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/04afbd1628684434a808dbd6712ce879, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/393b54b400a748d18a5ed6a8dd7703a6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1cdf0569dda24789b272489aae3b46a3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/8cda4f8edc804cf796faff52b716b987, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/96b00a2aa9384bab83cb3056fe3ed8fe, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b6924dfb8ade4a229f09e981801ff2c4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e0b57dee62694719a9da03a7fc4059d4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/d57d32fc88ea4fd7a1d262e978f2e3f6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1b76ceca4cf74046a6487002474c20fd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/290e327985c0494cb04527b2de3f45fb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/18050d6399b74bd0a59fc07358f7b6b7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a0f341cab710448eb964a1b9b9f9e25b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/5df75b3c7c524d278476a1aebe06869e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/852eb318e4d74c25919bb5aa2f8e7fce, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/cf1ce271423c49069168fa868877a4f7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/130adbcb49fe47e4ac3c6f0c44d6b793, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/6ab8a55f8d6048489d0d878eff2d758c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/fcbe5f18193741448fc1a22c146d5df3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e3f2bb8beb0f4986b90f022909b93133, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4cbd40fe6a374e3f93d0cde2f3098060, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/724bab43217749b28e265e26166fcc9c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/c6aad5c760164f7ebd9262bcc5e73afa] to archive 2024-11-26T10:33:46,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:33:46,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/398abb2cc49d48718b6d75448f7cda1d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/398abb2cc49d48718b6d75448f7cda1d 2024-11-26T10:33:46,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e808e7ecb9e140b495c66e945bf9f70f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e808e7ecb9e140b495c66e945bf9f70f 2024-11-26T10:33:46,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/f3be9e6d81134218a818f3f35e0aff68 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/f3be9e6d81134218a818f3f35e0aff68 2024-11-26T10:33:46,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b8ded2282a824f279bd554ad8b5b596e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b8ded2282a824f279bd554ad8b5b596e 2024-11-26T10:33:46,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4f1af6f0d83d4affa902d16a6112a0a9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4f1af6f0d83d4affa902d16a6112a0a9 2024-11-26T10:33:46,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1c0333ef61dc49cb8ce8fa3826c53d88 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1c0333ef61dc49cb8ce8fa3826c53d88 2024-11-26T10:33:46,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/369e824438d6472b8bd73d75037d37e8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/369e824438d6472b8bd73d75037d37e8 2024-11-26T10:33:46,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/42976dfb274049108aa5da1aeb31a7f9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/42976dfb274049108aa5da1aeb31a7f9 2024-11-26T10:33:46,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e66901f60da3432d84657cf6f1a5ca37 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e66901f60da3432d84657cf6f1a5ca37 2024-11-26T10:33:46,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a4c067f22df4485a9f25a940e7d5b06a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a4c067f22df4485a9f25a940e7d5b06a 2024-11-26T10:33:46,436 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/8c5790d18b9a4f79913bca1faa0afde1 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/8c5790d18b9a4f79913bca1faa0afde1 2024-11-26T10:33:46,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/f52af82ae0c54109ada088bf62ad123f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/f52af82ae0c54109ada088bf62ad123f 2024-11-26T10:33:46,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/543aedca104e48679af3fd3ad13824cf to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/543aedca104e48679af3fd3ad13824cf 2024-11-26T10:33:46,440 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/253e67d32e0e400c947f5ebf79a9fc40 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/253e67d32e0e400c947f5ebf79a9fc40 2024-11-26T10:33:46,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/bab66df00b63458494b13f6245dbc828 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/bab66df00b63458494b13f6245dbc828 2024-11-26T10:33:46,442 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/dac6b4717845476e817110decb445c14 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/dac6b4717845476e817110decb445c14 2024-11-26T10:33:46,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/14194599004b4968a5cb09dbfc0b4131 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/14194599004b4968a5cb09dbfc0b4131 2024-11-26T10:33:46,444 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/de0c65b0449c4c43bea280c0a1bff8b0 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/de0c65b0449c4c43bea280c0a1bff8b0 2024-11-26T10:33:46,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/526147ad62ac4068b33cad0b3992b418 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/526147ad62ac4068b33cad0b3992b418 2024-11-26T10:33:46,446 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/04afbd1628684434a808dbd6712ce879 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/04afbd1628684434a808dbd6712ce879 2024-11-26T10:33:46,447 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/393b54b400a748d18a5ed6a8dd7703a6 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/393b54b400a748d18a5ed6a8dd7703a6 2024-11-26T10:33:46,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1cdf0569dda24789b272489aae3b46a3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1cdf0569dda24789b272489aae3b46a3 2024-11-26T10:33:46,449 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/8cda4f8edc804cf796faff52b716b987 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/8cda4f8edc804cf796faff52b716b987 2024-11-26T10:33:46,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/96b00a2aa9384bab83cb3056fe3ed8fe to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/96b00a2aa9384bab83cb3056fe3ed8fe 2024-11-26T10:33:46,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b6924dfb8ade4a229f09e981801ff2c4 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/b6924dfb8ade4a229f09e981801ff2c4 2024-11-26T10:33:46,452 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e0b57dee62694719a9da03a7fc4059d4 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e0b57dee62694719a9da03a7fc4059d4 2024-11-26T10:33:46,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/d57d32fc88ea4fd7a1d262e978f2e3f6 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/d57d32fc88ea4fd7a1d262e978f2e3f6 2024-11-26T10:33:46,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1b76ceca4cf74046a6487002474c20fd to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/1b76ceca4cf74046a6487002474c20fd 2024-11-26T10:33:46,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/290e327985c0494cb04527b2de3f45fb to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/290e327985c0494cb04527b2de3f45fb 2024-11-26T10:33:46,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/18050d6399b74bd0a59fc07358f7b6b7 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/18050d6399b74bd0a59fc07358f7b6b7 2024-11-26T10:33:46,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a0f341cab710448eb964a1b9b9f9e25b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/a0f341cab710448eb964a1b9b9f9e25b 2024-11-26T10:33:46,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/5df75b3c7c524d278476a1aebe06869e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/5df75b3c7c524d278476a1aebe06869e 2024-11-26T10:33:46,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/852eb318e4d74c25919bb5aa2f8e7fce to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/852eb318e4d74c25919bb5aa2f8e7fce 2024-11-26T10:33:46,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/cf1ce271423c49069168fa868877a4f7 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/cf1ce271423c49069168fa868877a4f7 2024-11-26T10:33:46,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/130adbcb49fe47e4ac3c6f0c44d6b793 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/130adbcb49fe47e4ac3c6f0c44d6b793 2024-11-26T10:33:46,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/6ab8a55f8d6048489d0d878eff2d758c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/6ab8a55f8d6048489d0d878eff2d758c 2024-11-26T10:33:46,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/fcbe5f18193741448fc1a22c146d5df3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/fcbe5f18193741448fc1a22c146d5df3 2024-11-26T10:33:46,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e3f2bb8beb0f4986b90f022909b93133 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/e3f2bb8beb0f4986b90f022909b93133 2024-11-26T10:33:46,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4cbd40fe6a374e3f93d0cde2f3098060 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/4cbd40fe6a374e3f93d0cde2f3098060 2024-11-26T10:33:46,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/724bab43217749b28e265e26166fcc9c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/724bab43217749b28e265e26166fcc9c 2024-11-26T10:33:46,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/c6aad5c760164f7ebd9262bcc5e73afa to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/c6aad5c760164f7ebd9262bcc5e73afa 2024-11-26T10:33:46,474 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/recovered.edits/606.seqid, newMaxSeqId=606, maxSeqId=1 2024-11-26T10:33:46,476 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad. 2024-11-26T10:33:46,476 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1635): Region close journal for 6a931c8e80842c8947954ecd8357e9ad: 2024-11-26T10:33:46,478 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(170): Closed 6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:46,479 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=6a931c8e80842c8947954ecd8357e9ad, regionState=CLOSED 2024-11-26T10:33:46,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-26T10:33:46,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseRegionProcedure 6a931c8e80842c8947954ecd8357e9ad, server=ccf62758a0a5,45419,1732617185877 in 1.6180 sec 2024-11-26T10:33:46,483 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-11-26T10:33:46,483 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6a931c8e80842c8947954ecd8357e9ad, UNASSIGN in 1.6240 sec 2024-11-26T10:33:46,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-26T10:33:46,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6320 sec 2024-11-26T10:33:46,486 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617226486"}]},"ts":"1732617226486"} 2024-11-26T10:33:46,488 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-26T10:33:46,529 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-26T10:33:46,531 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.7230 sec 2024-11-26T10:33:46,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-26T10:33:46,922 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-26T10:33:46,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-26T10:33:46,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:33:46,935 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:33:46,936 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=40, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:33:46,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-26T10:33:46,939 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:46,943 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/recovered.edits] 2024-11-26T10:33:46,947 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/223373147862429c9eca07260443d362 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/223373147862429c9eca07260443d362 2024-11-26T10:33:46,949 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/54178a599846437ba70e2c45b326ded5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/A/54178a599846437ba70e2c45b326ded5 2024-11-26T10:33:46,952 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/20af0c1aa8544bcb81d07f7c9153b71d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/20af0c1aa8544bcb81d07f7c9153b71d 2024-11-26T10:33:46,953 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9bd0212936744afbb1eef4588f55a74e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/B/9bd0212936744afbb1eef4588f55a74e 2024-11-26T10:33:46,956 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/af362d86d9f74a009bbd0a0a1f0d1133 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/af362d86d9f74a009bbd0a0a1f0d1133 2024-11-26T10:33:46,957 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/fe88e3207b9d4153a5b47e086cfa606a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/C/fe88e3207b9d4153a5b47e086cfa606a 2024-11-26T10:33:46,960 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/recovered.edits/606.seqid to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad/recovered.edits/606.seqid 2024-11-26T10:33:46,960 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/6a931c8e80842c8947954ecd8357e9ad 2024-11-26T10:33:46,960 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-26T10:33:46,965 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=40, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:33:46,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-26T10:33:46,971 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-26T10:33:47,003 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-26T10:33:47,005 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=40, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:33:47,005 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-26T10:33:47,005 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732617227005"}]},"ts":"9223372036854775807"} 2024-11-26T10:33:47,008 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-26T10:33:47,008 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6a931c8e80842c8947954ecd8357e9ad, NAME => 'TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad.', STARTKEY => '', ENDKEY => ''}] 2024-11-26T10:33:47,008 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-26T10:33:47,009 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732617227008"}]},"ts":"9223372036854775807"} 2024-11-26T10:33:47,011 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-26T10:33:47,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-26T10:33:47,047 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=40, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:33:47,048 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 116 msec 2024-11-26T10:33:47,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-26T10:33:47,239 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 40 completed 2024-11-26T10:33:47,258 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: hconnection-0x19eefe61-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x19eefe61-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1831652990_22 at /127.0.0.1:53528 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;ccf62758a0a5:45419-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x19eefe61-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x19eefe61-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=453 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=320 (was 278) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5417 (was 5963) 2024-11-26T10:33:47,268 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=320, ProcessCount=11, AvailableMemoryMB=5417 2024-11-26T10:33:47,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-26T10:33:47,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:33:47,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-26T10:33:47,272 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:33:47,272 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:47,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 41 2024-11-26T10:33:47,273 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:33:47,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-26T10:33:47,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741970_1146 (size=963) 2024-11-26T10:33:47,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-26T10:33:47,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-26T10:33:47,686 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1 2024-11-26T10:33:47,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741971_1147 (size=53) 2024-11-26T10:33:47,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-26T10:33:48,095 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:33:48,095 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing e4978131eaad54a00cbbf3f245fd971c, disabling compactions & flushes 2024-11-26T10:33:48,095 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:48,095 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:48,095 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. after waiting 0 ms 2024-11-26T10:33:48,095 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:48,096 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:48,096 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:48,098 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:33:48,098 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732617228098"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732617228098"}]},"ts":"1732617228098"} 2024-11-26T10:33:48,100 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-26T10:33:48,102 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:33:48,102 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617228102"}]},"ts":"1732617228102"} 2024-11-26T10:33:48,104 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-26T10:33:48,146 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e4978131eaad54a00cbbf3f245fd971c, ASSIGN}] 2024-11-26T10:33:48,148 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e4978131eaad54a00cbbf3f245fd971c, ASSIGN 2024-11-26T10:33:48,149 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e4978131eaad54a00cbbf3f245fd971c, ASSIGN; state=OFFLINE, location=ccf62758a0a5,45419,1732617185877; forceNewPlan=false, retain=false 2024-11-26T10:33:48,300 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=e4978131eaad54a00cbbf3f245fd971c, regionState=OPENING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:48,304 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; OpenRegionProcedure e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:33:48,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-26T10:33:48,458 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:48,465 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:48,465 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7285): Opening region: {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:33:48,466 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:48,466 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:33:48,466 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7327): checking encryption for e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:48,466 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7330): checking classloading for e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:48,468 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:48,471 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:33:48,471 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4978131eaad54a00cbbf3f245fd971c columnFamilyName A 2024-11-26T10:33:48,471 DEBUG [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:48,472 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(327): Store=e4978131eaad54a00cbbf3f245fd971c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:48,472 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:48,474 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:33:48,475 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4978131eaad54a00cbbf3f245fd971c columnFamilyName B 2024-11-26T10:33:48,475 DEBUG [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:48,476 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(327): Store=e4978131eaad54a00cbbf3f245fd971c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:48,476 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:48,477 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:33:48,477 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4978131eaad54a00cbbf3f245fd971c columnFamilyName C 2024-11-26T10:33:48,477 DEBUG [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:48,478 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(327): Store=e4978131eaad54a00cbbf3f245fd971c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:48,478 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:48,479 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:48,480 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:48,481 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:33:48,483 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1085): writing seq id for e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:48,485 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:33:48,486 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1102): Opened e4978131eaad54a00cbbf3f245fd971c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60090994, jitterRate=-0.10457441210746765}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:33:48,487 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1001): Region open journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:48,488 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., pid=43, masterSystemTime=1732617228458 2024-11-26T10:33:48,489 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:48,490 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:48,490 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=e4978131eaad54a00cbbf3f245fd971c, regionState=OPEN, openSeqNum=2, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:48,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-26T10:33:48,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; OpenRegionProcedure e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 in 187 msec 2024-11-26T10:33:48,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-26T10:33:48,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e4978131eaad54a00cbbf3f245fd971c, ASSIGN in 347 msec 2024-11-26T10:33:48,495 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:33:48,495 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617228495"}]},"ts":"1732617228495"} 2024-11-26T10:33:48,496 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-26T10:33:48,535 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:33:48,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2650 sec 2024-11-26T10:33:49,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-26T10:33:49,385 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-11-26T10:33:49,391 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04506927 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a9b9802 2024-11-26T10:33:49,456 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@118b007e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:49,461 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:49,465 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55172, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:49,467 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:33:49,469 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37280, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:33:49,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-26T10:33:49,474 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:33:49,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=44, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-26T10:33:49,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741972_1148 (size=999) 2024-11-26T10:33:49,898 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-26T10:33:49,898 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-26T10:33:49,905 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-26T10:33:49,915 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e4978131eaad54a00cbbf3f245fd971c, REOPEN/MOVE}] 2024-11-26T10:33:49,916 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e4978131eaad54a00cbbf3f245fd971c, REOPEN/MOVE 2024-11-26T10:33:49,916 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=e4978131eaad54a00cbbf3f245fd971c, regionState=CLOSING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:49,917 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-26T10:33:49,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; CloseRegionProcedure e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:33:50,069 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,070 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(124): Close e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,070 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-26T10:33:50,070 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1681): Closing e4978131eaad54a00cbbf3f245fd971c, disabling compactions & flushes 2024-11-26T10:33:50,071 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,071 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,071 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. after waiting 0 ms 2024-11-26T10:33:50,071 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,080 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-26T10:33:50,080 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,080 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1635): Region close journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:50,081 WARN [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegionServer(3786): Not adding moved region record: e4978131eaad54a00cbbf3f245fd971c to self. 2024-11-26T10:33:50,082 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(170): Closed e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,083 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=e4978131eaad54a00cbbf3f245fd971c, regionState=CLOSED 2024-11-26T10:33:50,086 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-11-26T10:33:50,086 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; CloseRegionProcedure e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 in 167 msec 2024-11-26T10:33:50,087 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e4978131eaad54a00cbbf3f245fd971c, REOPEN/MOVE; state=CLOSED, location=ccf62758a0a5,45419,1732617185877; forceNewPlan=false, retain=true 2024-11-26T10:33:50,237 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=e4978131eaad54a00cbbf3f245fd971c, regionState=OPENING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,239 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE; OpenRegionProcedure e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:33:50,391 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,397 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,397 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7285): Opening region: {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:33:50,398 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,398 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:33:50,398 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7327): checking encryption for e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,399 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7330): checking classloading for e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,403 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,404 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:33:50,411 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4978131eaad54a00cbbf3f245fd971c columnFamilyName A 2024-11-26T10:33:50,414 DEBUG [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:50,414 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(327): Store=e4978131eaad54a00cbbf3f245fd971c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:50,415 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,416 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:33:50,416 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4978131eaad54a00cbbf3f245fd971c columnFamilyName B 2024-11-26T10:33:50,417 DEBUG [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:50,417 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(327): Store=e4978131eaad54a00cbbf3f245fd971c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:50,417 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,418 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:33:50,418 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4978131eaad54a00cbbf3f245fd971c columnFamilyName C 2024-11-26T10:33:50,418 DEBUG [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:50,419 INFO [StoreOpener-e4978131eaad54a00cbbf3f245fd971c-1 {}] regionserver.HStore(327): Store=e4978131eaad54a00cbbf3f245fd971c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:33:50,419 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,420 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,421 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,422 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:33:50,424 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1085): writing seq id for e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,424 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1102): Opened e4978131eaad54a00cbbf3f245fd971c; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60426611, jitterRate=-0.09957332909107208}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:33:50,426 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1001): Region open journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:50,426 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., pid=48, masterSystemTime=1732617230390 2024-11-26T10:33:50,428 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,428 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,428 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=e4978131eaad54a00cbbf3f245fd971c, regionState=OPEN, openSeqNum=5, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,430 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-11-26T10:33:50,430 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; OpenRegionProcedure e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 in 191 msec 2024-11-26T10:33:50,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-26T10:33:50,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e4978131eaad54a00cbbf3f245fd971c, REOPEN/MOVE in 515 msec 2024-11-26T10:33:50,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-26T10:33:50,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 529 msec 2024-11-26T10:33:50,437 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 959 msec 2024-11-26T10:33:50,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=44 2024-11-26T10:33:50,443 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7362d978 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cae6c5c 2024-11-26T10:33:50,501 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c7d6279, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:50,504 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bad2e85 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c820ef9 2024-11-26T10:33:50,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b55744e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:50,527 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ebda6ad to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b44b1e5 2024-11-26T10:33:50,538 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@454f1431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:50,540 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x19a533a3 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42e904d8 2024-11-26T10:33:50,551 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769942d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:50,554 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x465dc764 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a4c53ed 2024-11-26T10:33:50,563 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367f47f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:50,566 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68f0be85 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@247c0c93 2024-11-26T10:33:50,577 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e911df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:50,580 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x152377d4 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@517ff977 2024-11-26T10:33:50,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b727d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:50,596 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a52344f to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3448d233 2024-11-26T10:33:50,605 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c7940d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:50,609 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08ba8425 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a11164b 2024-11-26T10:33:50,622 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c38ee58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:33:50,628 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:50,628 DEBUG [hconnection-0x1e22067d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:50,628 DEBUG [hconnection-0x31153c1b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:50,628 DEBUG [hconnection-0x553fba0b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:50,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-26T10:33:50,629 DEBUG [hconnection-0x623f48b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:50,630 DEBUG [hconnection-0x6ebb124e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:50,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-26T10:33:50,630 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:50,631 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55182, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:50,631 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55196, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:50,631 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55210, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:50,631 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:50,631 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55216, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:50,632 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55230, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:50,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:50,634 DEBUG [hconnection-0x3627e90e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:50,635 DEBUG [hconnection-0x78b8fca0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:50,635 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:50,635 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:50,639 DEBUG [hconnection-0x3afb7273-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:50,639 DEBUG [hconnection-0x57c53fe4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:33:50,641 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55276, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:50,642 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55260, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:33:50,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:33:50,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:33:50,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:50,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:33:50,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:50,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:33:50,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:50,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112641bd1bef83a749138606883b48ac5270_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617230646/Put/seqid=0 2024-11-26T10:33:50,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:50,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:50,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617290687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617290688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:50,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617290695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:50,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617290695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:50,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617290695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741973_1149 (size=12154) 2024-11-26T10:33:50,723 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:50,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-26T10:33:50,734 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112641bd1bef83a749138606883b48ac5270_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112641bd1bef83a749138606883b48ac5270_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:50,735 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b145f03845ef403381eac3537e8aa2fa, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:50,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b145f03845ef403381eac3537e8aa2fa is 175, key is test_row_0/A:col10/1732617230646/Put/seqid=0 2024-11-26T10:33:50,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741974_1150 (size=30955) 2024-11-26T10:33:50,784 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-26T10:33:50,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:50,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:50,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:50,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:50,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:50,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617290798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:50,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617290798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:50,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617290798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:50,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617290798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:50,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617290798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-26T10:33:50,938 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:50,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-26T10:33:50,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:50,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:50,939 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:50,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:50,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617291002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617291003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617291004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617291004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617291004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,090 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-26T10:33:51,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:51,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:51,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:51,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,150 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b145f03845ef403381eac3537e8aa2fa 2024-11-26T10:33:51,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/448d01fd6aba407c93fc3348ebb7813a is 50, key is test_row_0/B:col10/1732617230646/Put/seqid=0 2024-11-26T10:33:51,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741975_1151 (size=12001) 2024-11-26T10:33:51,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-26T10:33:51,243 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-26T10:33:51,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:51,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:51,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:51,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617291326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617291326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617291327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617291328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617291328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,396 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-26T10:33:51,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:51,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:51,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:51,398 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,550 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-26T10:33:51,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:51,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:51,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:51,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:51,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/448d01fd6aba407c93fc3348ebb7813a 2024-11-26T10:33:51,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/eb3d2bcf46cc4e15b7fa4102c8274a4c is 50, key is test_row_0/C:col10/1732617230646/Put/seqid=0 2024-11-26T10:33:51,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741976_1152 (size=12001) 2024-11-26T10:33:51,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/eb3d2bcf46cc4e15b7fa4102c8274a4c 2024-11-26T10:33:51,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b145f03845ef403381eac3537e8aa2fa as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b145f03845ef403381eac3537e8aa2fa 2024-11-26T10:33:51,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b145f03845ef403381eac3537e8aa2fa, entries=150, sequenceid=17, filesize=30.2 K 2024-11-26T10:33:51,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/448d01fd6aba407c93fc3348ebb7813a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/448d01fd6aba407c93fc3348ebb7813a 2024-11-26T10:33:51,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/448d01fd6aba407c93fc3348ebb7813a, entries=150, sequenceid=17, filesize=11.7 K 2024-11-26T10:33:51,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/eb3d2bcf46cc4e15b7fa4102c8274a4c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/eb3d2bcf46cc4e15b7fa4102c8274a4c 2024-11-26T10:33:51,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/eb3d2bcf46cc4e15b7fa4102c8274a4c, entries=150, sequenceid=17, filesize=11.7 K 2024-11-26T10:33:51,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for e4978131eaad54a00cbbf3f245fd971c in 1014ms, sequenceid=17, compaction requested=false 2024-11-26T10:33:51,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:51,703 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-26T10:33:51,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:51,704 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-26T10:33:51,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:33:51,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:51,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:33:51,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:51,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:33:51,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:51,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126484c40836afb4930930a1402183c9fad_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617230688/Put/seqid=0 2024-11-26T10:33:51,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-26T10:33:51,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741977_1153 (size=12154) 2024-11-26T10:33:51,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:51,760 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126484c40836afb4930930a1402183c9fad_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126484c40836afb4930930a1402183c9fad_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:51,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/fdef71fb80894a42b52a9d81c44ab3ce, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:51,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/fdef71fb80894a42b52a9d81c44ab3ce is 175, key is test_row_0/A:col10/1732617230688/Put/seqid=0 2024-11-26T10:33:51,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741978_1154 (size=30955) 2024-11-26T10:33:51,778 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/fdef71fb80894a42b52a9d81c44ab3ce 2024-11-26T10:33:51,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/26ba8e1df3994313aff8833179e8a838 is 50, key is test_row_0/B:col10/1732617230688/Put/seqid=0 2024-11-26T10:33:51,802 INFO [master/ccf62758a0a5:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-26T10:33:51,802 INFO [master/ccf62758a0a5:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-26T10:33:51,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741979_1155 (size=12001) 2024-11-26T10:33:51,809 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/26ba8e1df3994313aff8833179e8a838 2024-11-26T10:33:51,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/2c55166e81d44f089c30fe6b35e4779f is 50, key is test_row_0/C:col10/1732617230688/Put/seqid=0 2024-11-26T10:33:51,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:51,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:51,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741980_1156 (size=12001) 2024-11-26T10:33:51,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617291839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617291841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617291841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617291841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617291843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617291944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617291945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617291945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:51,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617291946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:51,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617291945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,030 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-26T10:33:52,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617292148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617292148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617292148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617292149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617292149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,236 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/2c55166e81d44f089c30fe6b35e4779f 2024-11-26T10:33:52,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/fdef71fb80894a42b52a9d81c44ab3ce as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/fdef71fb80894a42b52a9d81c44ab3ce 2024-11-26T10:33:52,249 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/fdef71fb80894a42b52a9d81c44ab3ce, entries=150, sequenceid=42, filesize=30.2 K 2024-11-26T10:33:52,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/26ba8e1df3994313aff8833179e8a838 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/26ba8e1df3994313aff8833179e8a838 2024-11-26T10:33:52,260 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/26ba8e1df3994313aff8833179e8a838, entries=150, sequenceid=42, filesize=11.7 K 2024-11-26T10:33:52,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/2c55166e81d44f089c30fe6b35e4779f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/2c55166e81d44f089c30fe6b35e4779f 2024-11-26T10:33:52,270 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/2c55166e81d44f089c30fe6b35e4779f, entries=150, sequenceid=42, filesize=11.7 K 2024-11-26T10:33:52,271 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for e4978131eaad54a00cbbf3f245fd971c in 567ms, sequenceid=42, compaction requested=false 2024-11-26T10:33:52,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:52,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:52,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-26T10:33:52,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-26T10:33:52,276 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-26T10:33:52,276 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6420 sec 2024-11-26T10:33:52,279 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.6490 sec 2024-11-26T10:33:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:52,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:33:52,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:33:52,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:52,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:33:52,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:52,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:33:52,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:52,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126b9ce016f5c2e44d6a37ea9f0257db73d_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617231839/Put/seqid=0 2024-11-26T10:33:52,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617292474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617292478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741981_1157 (size=14594) 2024-11-26T10:33:52,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617292478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617292479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,486 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:52,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617292479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,491 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126b9ce016f5c2e44d6a37ea9f0257db73d_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126b9ce016f5c2e44d6a37ea9f0257db73d_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:52,493 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b909f922f0874cc6990f6f8d17f32bfd, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:52,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b909f922f0874cc6990f6f8d17f32bfd is 175, key is test_row_0/A:col10/1732617231839/Put/seqid=0 2024-11-26T10:33:52,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741982_1158 (size=39549) 2024-11-26T10:33:52,507 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b909f922f0874cc6990f6f8d17f32bfd 2024-11-26T10:33:52,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/10a097e263ce45b2aa647c139a50972a is 50, key is test_row_0/B:col10/1732617231839/Put/seqid=0 2024-11-26T10:33:52,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741983_1159 (size=12001) 2024-11-26T10:33:52,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/10a097e263ce45b2aa647c139a50972a 2024-11-26T10:33:52,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/c4f9dfbcdaef4acfad4a92172c4ee9fc is 50, key is test_row_0/C:col10/1732617231839/Put/seqid=0 2024-11-26T10:33:52,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741984_1160 (size=12001) 2024-11-26T10:33:52,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/c4f9dfbcdaef4acfad4a92172c4ee9fc 2024-11-26T10:33:52,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b909f922f0874cc6990f6f8d17f32bfd as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b909f922f0874cc6990f6f8d17f32bfd 2024-11-26T10:33:52,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617292581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b909f922f0874cc6990f6f8d17f32bfd, entries=200, sequenceid=55, filesize=38.6 K 2024-11-26T10:33:52,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617292584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617292588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617292588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617292588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/10a097e263ce45b2aa647c139a50972a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/10a097e263ce45b2aa647c139a50972a 2024-11-26T10:33:52,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/10a097e263ce45b2aa647c139a50972a, entries=150, sequenceid=55, filesize=11.7 K 2024-11-26T10:33:52,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/c4f9dfbcdaef4acfad4a92172c4ee9fc as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/c4f9dfbcdaef4acfad4a92172c4ee9fc 2024-11-26T10:33:52,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/c4f9dfbcdaef4acfad4a92172c4ee9fc, entries=150, sequenceid=55, filesize=11.7 K 2024-11-26T10:33:52,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for e4978131eaad54a00cbbf3f245fd971c in 157ms, sequenceid=55, compaction requested=true 2024-11-26T10:33:52,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:52,611 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:52,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:52,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:52,612 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:52,613 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:52,613 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/A is initiating minor compaction (all files) 2024-11-26T10:33:52,613 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/A in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:52,613 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b145f03845ef403381eac3537e8aa2fa, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/fdef71fb80894a42b52a9d81c44ab3ce, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b909f922f0874cc6990f6f8d17f32bfd] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=99.1 K 2024-11-26T10:33:52,613 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:52,613 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b145f03845ef403381eac3537e8aa2fa, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/fdef71fb80894a42b52a9d81c44ab3ce, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b909f922f0874cc6990f6f8d17f32bfd] 2024-11-26T10:33:52,614 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting b145f03845ef403381eac3537e8aa2fa, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732617230639 2024-11-26T10:33:52,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:52,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:52,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:52,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:52,615 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:52,615 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/B is initiating minor compaction (all files) 2024-11-26T10:33:52,615 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/B in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:52,615 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/448d01fd6aba407c93fc3348ebb7813a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/26ba8e1df3994313aff8833179e8a838, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/10a097e263ce45b2aa647c139a50972a] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=35.2 K 2024-11-26T10:33:52,615 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdef71fb80894a42b52a9d81c44ab3ce, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732617230681 2024-11-26T10:33:52,616 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting b909f922f0874cc6990f6f8d17f32bfd, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617231839 2024-11-26T10:33:52,616 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 448d01fd6aba407c93fc3348ebb7813a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732617230639 2024-11-26T10:33:52,616 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 26ba8e1df3994313aff8833179e8a838, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732617230681 2024-11-26T10:33:52,616 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 10a097e263ce45b2aa647c139a50972a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617231839 2024-11-26T10:33:52,625 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:52,642 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#B#compaction#141 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:52,642 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/91dbf6950ccd4730a5ec2e2d0df045f2 is 50, key is test_row_0/B:col10/1732617231839/Put/seqid=0 2024-11-26T10:33:52,651 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112666752ed7c26849449e541beb0729018b_e4978131eaad54a00cbbf3f245fd971c store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:52,656 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112666752ed7c26849449e541beb0729018b_e4978131eaad54a00cbbf3f245fd971c, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:52,656 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112666752ed7c26849449e541beb0729018b_e4978131eaad54a00cbbf3f245fd971c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:52,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741986_1162 (size=4469) 2024-11-26T10:33:52,671 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#A#compaction#140 average throughput is 0.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:52,673 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/80002b85d97c4cd184681b8bbf1d2652 is 175, key is test_row_0/A:col10/1732617231839/Put/seqid=0 2024-11-26T10:33:52,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741985_1161 (size=12104) 2024-11-26T10:33:52,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741987_1163 (size=31058) 2024-11-26T10:33:52,703 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/80002b85d97c4cd184681b8bbf1d2652 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/80002b85d97c4cd184681b8bbf1d2652 2024-11-26T10:33:52,709 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/A of e4978131eaad54a00cbbf3f245fd971c into 80002b85d97c4cd184681b8bbf1d2652(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:52,709 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:52,709 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/A, priority=13, startTime=1732617232611; duration=0sec 2024-11-26T10:33:52,709 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:52,709 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:A 2024-11-26T10:33:52,710 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:33:52,712 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:33:52,712 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/C is initiating minor compaction (all files) 2024-11-26T10:33:52,712 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/C in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:52,712 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/eb3d2bcf46cc4e15b7fa4102c8274a4c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/2c55166e81d44f089c30fe6b35e4779f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/c4f9dfbcdaef4acfad4a92172c4ee9fc] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=35.2 K 2024-11-26T10:33:52,713 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb3d2bcf46cc4e15b7fa4102c8274a4c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732617230639 2024-11-26T10:33:52,714 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c55166e81d44f089c30fe6b35e4779f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732617230681 2024-11-26T10:33:52,714 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4f9dfbcdaef4acfad4a92172c4ee9fc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617231839 2024-11-26T10:33:52,726 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#C#compaction#142 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:52,726 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/3ea95a9d322e410cb4d1d9c9f673f952 is 50, key is test_row_0/C:col10/1732617231839/Put/seqid=0 2024-11-26T10:33:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-26T10:33:52,735 INFO [Thread-728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-26T10:33:52,737 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-26T10:33:52,739 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:52,740 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:52,740 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-26T10:33:52,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741988_1164 (size=12104) 2024-11-26T10:33:52,762 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/3ea95a9d322e410cb4d1d9c9f673f952 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/3ea95a9d322e410cb4d1d9c9f673f952 2024-11-26T10:33:52,774 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/C of e4978131eaad54a00cbbf3f245fd971c into 3ea95a9d322e410cb4d1d9c9f673f952(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:52,774 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:52,774 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/C, priority=13, startTime=1732617232614; duration=0sec 2024-11-26T10:33:52,774 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:52,775 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:C 2024-11-26T10:33:52,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:52,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-26T10:33:52,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:33:52,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:52,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:33:52,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:52,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:33:52,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:52,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126067ab4e266e5453da817efae0a9f1066_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617232471/Put/seqid=0 2024-11-26T10:33:52,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617292803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617292804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617292804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617292805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617292806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741989_1165 (size=12154) 2024-11-26T10:33:52,828 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:52,833 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126067ab4e266e5453da817efae0a9f1066_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126067ab4e266e5453da817efae0a9f1066_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:52,835 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b0de51c241964eaa80ea35658051c73e, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:52,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b0de51c241964eaa80ea35658051c73e is 175, key is test_row_0/A:col10/1732617232471/Put/seqid=0 2024-11-26T10:33:52,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741990_1166 (size=30955) 2024-11-26T10:33:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-26T10:33:52,892 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-26T10:33:52,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:52,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:52,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:52,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:52,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:52,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:52,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617292910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617292911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617292911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617292912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:52,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:52,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617292912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-26T10:33:53,045 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-26T10:33:53,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:53,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:53,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:53,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:53,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:53,091 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/91dbf6950ccd4730a5ec2e2d0df045f2 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/91dbf6950ccd4730a5ec2e2d0df045f2 2024-11-26T10:33:53,096 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/B of e4978131eaad54a00cbbf3f245fd971c into 91dbf6950ccd4730a5ec2e2d0df045f2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:53,096 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:53,096 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/B, priority=13, startTime=1732617232611; duration=0sec 2024-11-26T10:33:53,096 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:53,096 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:B 2024-11-26T10:33:53,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617293113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617293113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617293115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617293116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617293116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,199 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-26T10:33:53,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:53,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:53,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:53,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:53,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:53,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:53,242 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=81, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b0de51c241964eaa80ea35658051c73e 2024-11-26T10:33:53,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/11f13265fcf8471b9828912a8f6f27dd is 50, key is test_row_0/B:col10/1732617232471/Put/seqid=0 2024-11-26T10:33:53,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741991_1167 (size=12001) 2024-11-26T10:33:53,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/11f13265fcf8471b9828912a8f6f27dd 2024-11-26T10:33:53,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/bd71bbb1d6b64227860ab2e719f2deeb is 50, key is test_row_0/C:col10/1732617232471/Put/seqid=0 2024-11-26T10:33:53,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741992_1168 (size=12001) 2024-11-26T10:33:53,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/bd71bbb1d6b64227860ab2e719f2deeb 2024-11-26T10:33:53,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/b0de51c241964eaa80ea35658051c73e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b0de51c241964eaa80ea35658051c73e 2024-11-26T10:33:53,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b0de51c241964eaa80ea35658051c73e, entries=150, sequenceid=81, filesize=30.2 K 2024-11-26T10:33:53,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/11f13265fcf8471b9828912a8f6f27dd as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/11f13265fcf8471b9828912a8f6f27dd 2024-11-26T10:33:53,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/11f13265fcf8471b9828912a8f6f27dd, entries=150, sequenceid=81, filesize=11.7 K 2024-11-26T10:33:53,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/bd71bbb1d6b64227860ab2e719f2deeb as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/bd71bbb1d6b64227860ab2e719f2deeb 2024-11-26T10:33:53,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/bd71bbb1d6b64227860ab2e719f2deeb, entries=150, sequenceid=81, filesize=11.7 K 2024-11-26T10:33:53,337 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for e4978131eaad54a00cbbf3f245fd971c in 547ms, sequenceid=81, compaction requested=false 2024-11-26T10:33:53,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-26T10:33:53,352 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,352 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-26T10:33:53,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:53,352 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-26T10:33:53,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:33:53,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:53,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:33:53,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:53,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:33:53,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:53,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126e4b042d1ee00420ebb034e2634a83b9c_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617232804/Put/seqid=0 2024-11-26T10:33:53,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741993_1169 (size=12154) 2024-11-26T10:33:53,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:53,384 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126e4b042d1ee00420ebb034e2634a83b9c_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126e4b042d1ee00420ebb034e2634a83b9c_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:53,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a6eff5e5ffe74f49a6869290558d4e5d, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:53,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a6eff5e5ffe74f49a6869290558d4e5d is 175, key is test_row_0/A:col10/1732617232804/Put/seqid=0 2024-11-26T10:33:53,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741994_1170 (size=30955) 2024-11-26T10:33:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:53,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:53,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617293445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617293446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617293447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617293447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617293449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617293550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617293550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617293553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617293553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617293554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617293753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617293754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617293755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617293755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:53,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617293759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:53,801 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a6eff5e5ffe74f49a6869290558d4e5d 2024-11-26T10:33:53,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/d187fe240026459b8c43c5bf1f2ffa13 is 50, key is test_row_0/B:col10/1732617232804/Put/seqid=0 2024-11-26T10:33:53,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741995_1171 (size=12001) 2024-11-26T10:33:53,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-26T10:33:54,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:54,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617294056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:54,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:54,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617294057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:54,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:54,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617294058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:54,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:54,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617294060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:54,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:54,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617294062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:54,216 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/d187fe240026459b8c43c5bf1f2ffa13 2024-11-26T10:33:54,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/102cfada9c794ef98c75215745293b37 is 50, key is test_row_0/C:col10/1732617232804/Put/seqid=0 2024-11-26T10:33:54,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741996_1172 (size=12001) 2024-11-26T10:33:54,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:54,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617294561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:54,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:54,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617294562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:54,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:54,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617294562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:54,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:54,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617294563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:54,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:54,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617294565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:54,629 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/102cfada9c794ef98c75215745293b37 2024-11-26T10:33:54,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a6eff5e5ffe74f49a6869290558d4e5d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a6eff5e5ffe74f49a6869290558d4e5d 2024-11-26T10:33:54,642 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a6eff5e5ffe74f49a6869290558d4e5d, entries=150, sequenceid=94, filesize=30.2 K 2024-11-26T10:33:54,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/d187fe240026459b8c43c5bf1f2ffa13 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/d187fe240026459b8c43c5bf1f2ffa13 2024-11-26T10:33:54,649 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/d187fe240026459b8c43c5bf1f2ffa13, entries=150, sequenceid=94, filesize=11.7 K 2024-11-26T10:33:54,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/102cfada9c794ef98c75215745293b37 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/102cfada9c794ef98c75215745293b37 2024-11-26T10:33:54,655 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/102cfada9c794ef98c75215745293b37, entries=150, sequenceid=94, filesize=11.7 K 2024-11-26T10:33:54,656 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e4978131eaad54a00cbbf3f245fd971c in 1304ms, sequenceid=94, compaction requested=true 2024-11-26T10:33:54,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:54,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:54,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-26T10:33:54,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-26T10:33:54,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-26T10:33:54,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9170 sec 2024-11-26T10:33:54,660 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.9220 sec 2024-11-26T10:33:54,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-26T10:33:54,845 INFO [Thread-728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-26T10:33:54,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:54,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-26T10:33:54,848 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:54,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-26T10:33:54,848 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:54,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:54,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-26T10:33:54,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-26T10:33:55,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:55,000 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-26T10:33:55,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:33:55,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:55,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:33:55,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:55,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:33:55,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:55,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126d9860ff85a8843e2a6abb1f21af3e1a9_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617233444/Put/seqid=0 2024-11-26T10:33:55,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741997_1173 (size=12154) 2024-11-26T10:33:55,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-26T10:33:55,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:55,441 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126d9860ff85a8843e2a6abb1f21af3e1a9_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126d9860ff85a8843e2a6abb1f21af3e1a9_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:55,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9aec94a5f6aa4946b283638bf9601143, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:55,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9aec94a5f6aa4946b283638bf9601143 is 175, key is test_row_0/A:col10/1732617233444/Put/seqid=0 2024-11-26T10:33:55,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741998_1174 (size=30955) 2024-11-26T10:33:55,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-26T10:33:55,451 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9aec94a5f6aa4946b283638bf9601143 2024-11-26T10:33:55,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/cc1fdaddd8f444ab9981038305ea2898 is 50, key is test_row_0/B:col10/1732617233444/Put/seqid=0 2024-11-26T10:33:55,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741999_1175 (size=12001) 2024-11-26T10:33:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:55,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:55,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617295575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617295576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617295577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617295578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617295578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617295679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617295679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617295680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617295681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,872 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/cc1fdaddd8f444ab9981038305ea2898 2024-11-26T10:33:55,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/89de52412b874097acdd8f06d0dadc67 is 50, key is test_row_0/C:col10/1732617233444/Put/seqid=0 2024-11-26T10:33:55,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617295882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617295882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742000_1176 (size=12001) 2024-11-26T10:33:55,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617295882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:55,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617295883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:55,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-26T10:33:56,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617296184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617296185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617296186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617296187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,285 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/89de52412b874097acdd8f06d0dadc67 2024-11-26T10:33:56,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9aec94a5f6aa4946b283638bf9601143 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9aec94a5f6aa4946b283638bf9601143 2024-11-26T10:33:56,293 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9aec94a5f6aa4946b283638bf9601143, entries=150, sequenceid=118, filesize=30.2 K 2024-11-26T10:33:56,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/cc1fdaddd8f444ab9981038305ea2898 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/cc1fdaddd8f444ab9981038305ea2898 2024-11-26T10:33:56,298 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/cc1fdaddd8f444ab9981038305ea2898, entries=150, sequenceid=118, filesize=11.7 K 2024-11-26T10:33:56,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/89de52412b874097acdd8f06d0dadc67 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/89de52412b874097acdd8f06d0dadc67 2024-11-26T10:33:56,303 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/89de52412b874097acdd8f06d0dadc67, entries=150, sequenceid=118, filesize=11.7 K 2024-11-26T10:33:56,304 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for e4978131eaad54a00cbbf3f245fd971c in 1304ms, sequenceid=118, compaction requested=true 2024-11-26T10:33:56,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:56,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:56,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-26T10:33:56,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-26T10:33:56,306 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-26T10:33:56,306 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4570 sec 2024-11-26T10:33:56,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.4600 sec 2024-11-26T10:33:56,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:56,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:33:56,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:33:56,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:56,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:33:56,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:56,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:33:56,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:56,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126d685602c37f44399a243452efa8e288f_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617236690/Put/seqid=0 2024-11-26T10:33:56,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742001_1177 (size=14744) 2024-11-26T10:33:56,706 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:56,713 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126d685602c37f44399a243452efa8e288f_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126d685602c37f44399a243452efa8e288f_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:56,714 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/148fffba1c924bc9b96ab17330fb353b, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:56,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/148fffba1c924bc9b96ab17330fb353b is 175, key is test_row_0/A:col10/1732617236690/Put/seqid=0 2024-11-26T10:33:56,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742002_1178 (size=39699) 2024-11-26T10:33:56,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617296717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617296717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617296718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617296719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617296824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617296824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617296824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:56,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617296826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:56,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-26T10:33:56,953 INFO [Thread-728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-26T10:33:56,954 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:33:56,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-26T10:33:56,955 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:33:56,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-26T10:33:56,956 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:33:56,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:33:57,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617297028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617297028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617297029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617297030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-26T10:33:57,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-26T10:33:57,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:57,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,125 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/148fffba1c924bc9b96ab17330fb353b 2024-11-26T10:33:57,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/7d02dec060ba48fd8bf1a6088c5ffe62 is 50, key is test_row_0/B:col10/1732617236690/Put/seqid=0 2024-11-26T10:33:57,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742003_1179 (size=12151) 2024-11-26T10:33:57,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-26T10:33:57,259 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-26T10:33:57,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:57,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617297333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617297334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617297334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617297335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,411 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-26T10:33:57,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:57,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,537 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/7d02dec060ba48fd8bf1a6088c5ffe62 2024-11-26T10:33:57,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/b3d146cd81d944a090627b0e5bb0eaab is 50, key is test_row_0/C:col10/1732617236690/Put/seqid=0 2024-11-26T10:33:57,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742004_1180 (size=12151) 2024-11-26T10:33:57,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-26T10:33:57,564 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-26T10:33:57,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:57,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,565 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617297580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,583 DEBUG [Thread-726 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:33:57,716 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,717 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-26T10:33:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,717 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617297835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617297837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617297838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617297839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,869 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:57,870 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-26T10:33:57,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:57,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,870 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:33:57,954 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/b3d146cd81d944a090627b0e5bb0eaab 2024-11-26T10:33:57,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/148fffba1c924bc9b96ab17330fb353b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/148fffba1c924bc9b96ab17330fb353b 2024-11-26T10:33:57,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/148fffba1c924bc9b96ab17330fb353b, entries=200, sequenceid=133, filesize=38.8 K 2024-11-26T10:33:57,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/7d02dec060ba48fd8bf1a6088c5ffe62 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/7d02dec060ba48fd8bf1a6088c5ffe62 2024-11-26T10:33:57,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/7d02dec060ba48fd8bf1a6088c5ffe62, entries=150, sequenceid=133, filesize=11.9 K 2024-11-26T10:33:57,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/b3d146cd81d944a090627b0e5bb0eaab as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/b3d146cd81d944a090627b0e5bb0eaab 2024-11-26T10:33:57,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/b3d146cd81d944a090627b0e5bb0eaab, entries=150, sequenceid=133, filesize=11.9 K 2024-11-26T10:33:57,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for e4978131eaad54a00cbbf3f245fd971c in 1285ms, sequenceid=133, compaction requested=true 2024-11-26T10:33:57,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:57,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:33:57,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:57,976 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-26T10:33:57,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:33:57,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:57,976 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-26T10:33:57,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:33:57,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:57,978 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60258 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-26T10:33:57,979 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/B is initiating minor compaction (all files) 2024-11-26T10:33:57,979 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/B in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,979 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/91dbf6950ccd4730a5ec2e2d0df045f2, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/11f13265fcf8471b9828912a8f6f27dd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/d187fe240026459b8c43c5bf1f2ffa13, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/cc1fdaddd8f444ab9981038305ea2898, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/7d02dec060ba48fd8bf1a6088c5ffe62] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=58.8 K 2024-11-26T10:33:57,979 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 163622 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-26T10:33:57,979 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/A is initiating minor compaction (all files) 2024-11-26T10:33:57,979 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 91dbf6950ccd4730a5ec2e2d0df045f2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617231839 2024-11-26T10:33:57,979 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/A in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,980 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/80002b85d97c4cd184681b8bbf1d2652, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b0de51c241964eaa80ea35658051c73e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a6eff5e5ffe74f49a6869290558d4e5d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9aec94a5f6aa4946b283638bf9601143, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/148fffba1c924bc9b96ab17330fb353b] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=159.8 K 2024-11-26T10:33:57,980 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:57,980 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/80002b85d97c4cd184681b8bbf1d2652, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b0de51c241964eaa80ea35658051c73e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a6eff5e5ffe74f49a6869290558d4e5d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9aec94a5f6aa4946b283638bf9601143, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/148fffba1c924bc9b96ab17330fb353b] 2024-11-26T10:33:57,980 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 11f13265fcf8471b9828912a8f6f27dd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732617232471 2024-11-26T10:33:57,980 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting d187fe240026459b8c43c5bf1f2ffa13, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732617232801 2024-11-26T10:33:57,980 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80002b85d97c4cd184681b8bbf1d2652, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617231839 2024-11-26T10:33:57,980 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting cc1fdaddd8f444ab9981038305ea2898, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732617233444 2024-11-26T10:33:57,981 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0de51c241964eaa80ea35658051c73e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732617232471 2024-11-26T10:33:57,981 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d02dec060ba48fd8bf1a6088c5ffe62, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732617236690 2024-11-26T10:33:57,981 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6eff5e5ffe74f49a6869290558d4e5d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732617232801 2024-11-26T10:33:57,981 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9aec94a5f6aa4946b283638bf9601143, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732617233444 2024-11-26T10:33:57,981 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 148fffba1c924bc9b96ab17330fb353b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732617235575 2024-11-26T10:33:57,992 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:57,992 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#B#compaction#155 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:57,993 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/281bcd856f554b7f8252aee4fb1728bd is 50, key is test_row_0/B:col10/1732617236690/Put/seqid=0 2024-11-26T10:33:57,995 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241126c0df7ec68519494c9cdda62d5ae739b6_e4978131eaad54a00cbbf3f245fd971c store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:58,001 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241126c0df7ec68519494c9cdda62d5ae739b6_e4978131eaad54a00cbbf3f245fd971c, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:58,002 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126c0df7ec68519494c9cdda62d5ae739b6_e4978131eaad54a00cbbf3f245fd971c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:58,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742005_1181 (size=12425) 2024-11-26T10:33:58,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742006_1182 (size=4469) 2024-11-26T10:33:58,022 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:58,022 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-26T10:33:58,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:58,023 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-26T10:33:58,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:33:58,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:58,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:33:58,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:58,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:33:58,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:58,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411269c54f931833240039bbabfb246dc2553_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617236710/Put/seqid=0 2024-11-26T10:33:58,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742007_1183 (size=12304) 2024-11-26T10:33:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-26T10:33:58,413 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/281bcd856f554b7f8252aee4fb1728bd as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/281bcd856f554b7f8252aee4fb1728bd 2024-11-26T10:33:58,419 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/B of e4978131eaad54a00cbbf3f245fd971c into 281bcd856f554b7f8252aee4fb1728bd(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:58,419 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:58,419 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/B, priority=11, startTime=1732617237976; duration=0sec 2024-11-26T10:33:58,419 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:33:58,419 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:B 2024-11-26T10:33:58,419 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-26T10:33:58,421 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60258 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-26T10:33:58,421 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/C is initiating minor compaction (all files) 2024-11-26T10:33:58,421 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#A#compaction#156 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:58,421 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/C in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:58,421 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/3ea95a9d322e410cb4d1d9c9f673f952, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/bd71bbb1d6b64227860ab2e719f2deeb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/102cfada9c794ef98c75215745293b37, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/89de52412b874097acdd8f06d0dadc67, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/b3d146cd81d944a090627b0e5bb0eaab] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=58.8 K 2024-11-26T10:33:58,422 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/cf5d108abd0c44f1b4a3544fe25d5d84 is 175, key is test_row_0/A:col10/1732617236690/Put/seqid=0 2024-11-26T10:33:58,422 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ea95a9d322e410cb4d1d9c9f673f952, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617231839 2024-11-26T10:33:58,422 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting bd71bbb1d6b64227860ab2e719f2deeb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732617232471 2024-11-26T10:33:58,422 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 102cfada9c794ef98c75215745293b37, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732617232801 2024-11-26T10:33:58,423 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 89de52412b874097acdd8f06d0dadc67, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732617233444 2024-11-26T10:33:58,423 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b3d146cd81d944a090627b0e5bb0eaab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732617236690 2024-11-26T10:33:58,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742008_1184 (size=31379) 2024-11-26T10:33:58,434 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#C#compaction#158 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:33:58,435 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/15e7c9e17e704540bc7e70ff580f31ef is 50, key is test_row_0/C:col10/1732617236690/Put/seqid=0 2024-11-26T10:33:58,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:58,442 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411269c54f931833240039bbabfb246dc2553_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411269c54f931833240039bbabfb246dc2553_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:58,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/2a982c27b305410083d2eefd5b532152, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:33:58,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/2a982c27b305410083d2eefd5b532152 is 175, key is test_row_0/A:col10/1732617236710/Put/seqid=0 2024-11-26T10:33:58,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742009_1185 (size=12425) 2024-11-26T10:33:58,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742010_1186 (size=31105) 2024-11-26T10:33:58,449 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/15e7c9e17e704540bc7e70ff580f31ef as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/15e7c9e17e704540bc7e70ff580f31ef 2024-11-26T10:33:58,453 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/C of e4978131eaad54a00cbbf3f245fd971c into 15e7c9e17e704540bc7e70ff580f31ef(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:58,453 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:58,454 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/C, priority=11, startTime=1732617237976; duration=0sec 2024-11-26T10:33:58,454 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:58,454 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:C 2024-11-26T10:33:58,831 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/cf5d108abd0c44f1b4a3544fe25d5d84 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/cf5d108abd0c44f1b4a3544fe25d5d84 2024-11-26T10:33:58,836 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/A of e4978131eaad54a00cbbf3f245fd971c into cf5d108abd0c44f1b4a3544fe25d5d84(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:33:58,836 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:58,836 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/A, priority=11, startTime=1732617237976; duration=0sec 2024-11-26T10:33:58,836 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:33:58,836 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:A 2024-11-26T10:33:58,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:33:58,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:58,848 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=154, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/2a982c27b305410083d2eefd5b532152 2024-11-26T10:33:58,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/55d677eb7121427cb7b9e86e94c8bc5a is 50, key is test_row_0/B:col10/1732617236710/Put/seqid=0 2024-11-26T10:33:58,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:58,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617298857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:58,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:58,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617298857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:58,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:58,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617298858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:58,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:58,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617298858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:58,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742011_1187 (size=12151) 2024-11-26T10:33:58,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:58,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617298960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:58,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:58,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617298960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:58,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:58,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617298960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:58,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:58,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617298962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:59,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-26T10:33:59,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617299162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:59,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:59,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617299162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617299164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:59,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:59,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617299165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:59,265 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/55d677eb7121427cb7b9e86e94c8bc5a 2024-11-26T10:33:59,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/84929dc17505488a897f3d539a00a90a is 50, key is test_row_0/C:col10/1732617236710/Put/seqid=0 2024-11-26T10:33:59,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742012_1188 (size=12151) 2024-11-26T10:33:59,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:59,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617299465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:59,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:59,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617299466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:59,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:59,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617299467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:59,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:33:59,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617299468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:33:59,677 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/84929dc17505488a897f3d539a00a90a 2024-11-26T10:33:59,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/2a982c27b305410083d2eefd5b532152 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/2a982c27b305410083d2eefd5b532152 2024-11-26T10:33:59,687 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/2a982c27b305410083d2eefd5b532152, entries=150, sequenceid=154, filesize=30.4 K 2024-11-26T10:33:59,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/55d677eb7121427cb7b9e86e94c8bc5a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/55d677eb7121427cb7b9e86e94c8bc5a 2024-11-26T10:33:59,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,693 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/55d677eb7121427cb7b9e86e94c8bc5a, entries=150, sequenceid=154, filesize=11.9 K 2024-11-26T10:33:59,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/84929dc17505488a897f3d539a00a90a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/84929dc17505488a897f3d539a00a90a 2024-11-26T10:33:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,699 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/84929dc17505488a897f3d539a00a90a, entries=150, sequenceid=154, filesize=11.9 K 2024-11-26T10:33:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,700 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for e4978131eaad54a00cbbf3f245fd971c in 1676ms, sequenceid=154, compaction requested=false 2024-11-26T10:33:59,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:33:59,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:33:59,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-26T10:33:59,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-26T10:33:59,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,702 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-26T10:33:59,702 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7450 sec 2024-11-26T10:33:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 2.7480 sec 2024-11-26T10:33:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:33:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-26T10:33:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:33:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:59,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:33:59,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:59,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:33:59,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:33:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126a67ae838f1954b13ad90d6642e2aac39_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617239975/Put/seqid=0 2024-11-26T10:33:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:33:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:00,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742013_1189 (size=14794) 2024-11-26T10:34:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:00,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617299999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617299998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617300000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,007 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617300005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617300106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617300107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617300107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617300108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617300309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617300310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617300310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617300314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,403 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:00,406 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126a67ae838f1954b13ad90d6642e2aac39_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126a67ae838f1954b13ad90d6642e2aac39_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:00,407 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/ee83770f41a949ce91620ef303ba209a, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:00,408 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/ee83770f41a949ce91620ef303ba209a is 175, key is test_row_0/A:col10/1732617239975/Put/seqid=0 2024-11-26T10:34:00,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742014_1190 (size=39749) 2024-11-26T10:34:00,413 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/ee83770f41a949ce91620ef303ba209a 2024-11-26T10:34:00,421 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/ec002635141e4cdba0e42ac5acf7054f is 50, key is test_row_0/B:col10/1732617239975/Put/seqid=0 2024-11-26T10:34:00,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742015_1191 (size=12151) 2024-11-26T10:34:00,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/ec002635141e4cdba0e42ac5acf7054f 2024-11-26T10:34:00,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/7afd49fbcac342ee953c88f889e6db9c is 50, key is test_row_0/C:col10/1732617239975/Put/seqid=0 2024-11-26T10:34:00,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742016_1192 (size=12151) 2024-11-26T10:34:00,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617300611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617300613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617300614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617300615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:00,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/7afd49fbcac342ee953c88f889e6db9c 2024-11-26T10:34:00,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/ee83770f41a949ce91620ef303ba209a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee83770f41a949ce91620ef303ba209a 2024-11-26T10:34:00,882 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee83770f41a949ce91620ef303ba209a, entries=200, sequenceid=174, filesize=38.8 K 2024-11-26T10:34:00,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/ec002635141e4cdba0e42ac5acf7054f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ec002635141e4cdba0e42ac5acf7054f 2024-11-26T10:34:00,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ec002635141e4cdba0e42ac5acf7054f, entries=150, sequenceid=174, filesize=11.9 K 2024-11-26T10:34:00,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/7afd49fbcac342ee953c88f889e6db9c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/7afd49fbcac342ee953c88f889e6db9c 2024-11-26T10:34:00,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/7afd49fbcac342ee953c88f889e6db9c, entries=150, sequenceid=174, filesize=11.9 K 2024-11-26T10:34:00,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for e4978131eaad54a00cbbf3f245fd971c in 917ms, sequenceid=174, compaction requested=true 2024-11-26T10:34:00,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:00,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:00,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:00,893 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:00,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:00,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:00,894 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:00,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:00,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:00,894 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102233 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:00,894 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36727 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:00,895 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/A is initiating minor compaction (all files) 2024-11-26T10:34:00,895 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/B is initiating minor compaction (all files) 2024-11-26T10:34:00,895 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/A in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:00,895 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/B in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:00,895 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/281bcd856f554b7f8252aee4fb1728bd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/55d677eb7121427cb7b9e86e94c8bc5a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ec002635141e4cdba0e42ac5acf7054f] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=35.9 K 2024-11-26T10:34:00,895 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/cf5d108abd0c44f1b4a3544fe25d5d84, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/2a982c27b305410083d2eefd5b532152, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee83770f41a949ce91620ef303ba209a] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=99.8 K 2024-11-26T10:34:00,895 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:00,895 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/cf5d108abd0c44f1b4a3544fe25d5d84, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/2a982c27b305410083d2eefd5b532152, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee83770f41a949ce91620ef303ba209a] 2024-11-26T10:34:00,895 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 281bcd856f554b7f8252aee4fb1728bd, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732617236690 2024-11-26T10:34:00,895 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf5d108abd0c44f1b4a3544fe25d5d84, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732617236690 2024-11-26T10:34:00,896 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 55d677eb7121427cb7b9e86e94c8bc5a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732617236710 2024-11-26T10:34:00,896 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a982c27b305410083d2eefd5b532152, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732617236710 2024-11-26T10:34:00,896 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting ec002635141e4cdba0e42ac5acf7054f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732617238857 2024-11-26T10:34:00,896 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee83770f41a949ce91620ef303ba209a, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732617238856 2024-11-26T10:34:00,902 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:00,903 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#B#compaction#164 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:00,904 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/930d14809acf4053b8a110f2292861a4 is 50, key is test_row_0/B:col10/1732617239975/Put/seqid=0 2024-11-26T10:34:00,907 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241126e4fe7b797ee848e68f50415e342a234e_e4978131eaad54a00cbbf3f245fd971c store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:00,909 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241126e4fe7b797ee848e68f50415e342a234e_e4978131eaad54a00cbbf3f245fd971c, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:00,909 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126e4fe7b797ee848e68f50415e342a234e_e4978131eaad54a00cbbf3f245fd971c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:00,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742017_1193 (size=12527) 2024-11-26T10:34:00,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742018_1194 (size=4469) 2024-11-26T10:34:00,945 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#A#compaction#165 average throughput is 0.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:00,946 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9251504322d348e48b5a18c2eb8cf628 is 175, key is test_row_0/A:col10/1732617239975/Put/seqid=0 2024-11-26T10:34:00,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742019_1195 (size=31481) 2024-11-26T10:34:00,960 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9251504322d348e48b5a18c2eb8cf628 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9251504322d348e48b5a18c2eb8cf628 2024-11-26T10:34:00,967 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/A of e4978131eaad54a00cbbf3f245fd971c into 9251504322d348e48b5a18c2eb8cf628(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:00,967 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:00,967 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/A, priority=13, startTime=1732617240893; duration=0sec 2024-11-26T10:34:00,967 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:00,967 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:A 2024-11-26T10:34:00,967 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:00,969 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36727 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:00,969 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/C is initiating minor compaction (all files) 2024-11-26T10:34:00,969 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/C in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:00,969 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/15e7c9e17e704540bc7e70ff580f31ef, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/84929dc17505488a897f3d539a00a90a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/7afd49fbcac342ee953c88f889e6db9c] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=35.9 K 2024-11-26T10:34:00,970 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15e7c9e17e704540bc7e70ff580f31ef, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732617236690 2024-11-26T10:34:00,970 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84929dc17505488a897f3d539a00a90a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732617236710 2024-11-26T10:34:00,970 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7afd49fbcac342ee953c88f889e6db9c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732617238857 2024-11-26T10:34:00,979 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#C#compaction#166 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:00,980 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/2622a5ce74aa45318e235372ae636169 is 50, key is test_row_0/C:col10/1732617239975/Put/seqid=0 2024-11-26T10:34:00,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742020_1196 (size=12527) 2024-11-26T10:34:01,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-26T10:34:01,060 INFO [Thread-728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-26T10:34:01,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-26T10:34:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-26T10:34:01,062 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:01,063 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:01,063 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:01,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-26T10:34:01,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:34:01,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:01,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:34:01,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:01,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:34:01,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:01,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112642dad63491f84c28b42d16882969e2e4_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617241120/Put/seqid=0 2024-11-26T10:34:01,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742021_1197 (size=17284) 2024-11-26T10:34:01,132 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:01,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617301133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617301134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617301135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,138 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112642dad63491f84c28b42d16882969e2e4_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112642dad63491f84c28b42d16882969e2e4_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:01,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617301135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,139 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/71deba5982734d81b7c6d2a5d6c2c097, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:01,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/71deba5982734d81b7c6d2a5d6c2c097 is 175, key is test_row_0/A:col10/1732617241120/Put/seqid=0 2024-11-26T10:34:01,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742022_1198 (size=48389) 2024-11-26T10:34:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-26T10:34:01,214 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-26T10:34:01,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:01,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617301237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617301239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617301239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617301239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,335 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/930d14809acf4053b8a110f2292861a4 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/930d14809acf4053b8a110f2292861a4 2024-11-26T10:34:01,340 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/B of e4978131eaad54a00cbbf3f245fd971c into 930d14809acf4053b8a110f2292861a4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:01,340 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:01,340 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/B, priority=13, startTime=1732617240893; duration=0sec 2024-11-26T10:34:01,340 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:01,340 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:B 2024-11-26T10:34:01,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-26T10:34:01,366 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-26T10:34:01,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:01,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,367 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,390 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/2622a5ce74aa45318e235372ae636169 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/2622a5ce74aa45318e235372ae636169 2024-11-26T10:34:01,395 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/C of e4978131eaad54a00cbbf3f245fd971c into 2622a5ce74aa45318e235372ae636169(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:01,396 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:01,396 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/C, priority=13, startTime=1732617240894; duration=0sec 2024-11-26T10:34:01,396 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:01,396 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:C 2024-11-26T10:34:01,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617301443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617301440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617301443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617301443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,519 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-26T10:34:01,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:01,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,553 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/71deba5982734d81b7c6d2a5d6c2c097 2024-11-26T10:34:01,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/91d5f5bfcf3f4cf484c3a589cb3a04b9 is 50, key is test_row_0/B:col10/1732617241120/Put/seqid=0 2024-11-26T10:34:01,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742023_1199 (size=12151) 2024-11-26T10:34:01,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617301597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,600 DEBUG [Thread-726 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:34:01,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-26T10:34:01,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-26T10:34:01,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:01,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617301745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617301745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:01,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617301747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617301747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,824 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-26T10:34:01,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:01,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/91d5f5bfcf3f4cf484c3a589cb3a04b9 2024-11-26T10:34:01,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/38731f42dff248e28c1b4100d66e4c54 is 50, key is test_row_0/C:col10/1732617241120/Put/seqid=0 2024-11-26T10:34:01,977 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:01,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-26T10:34:01,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:01,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:01,977 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:01,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742024_1200 (size=12151) 2024-11-26T10:34:02,129 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:02,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-26T10:34:02,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:02,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:02,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:02,130 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:02,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:02,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:02,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-26T10:34:02,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:02,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617302250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:02,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:02,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617302250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:02,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:02,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617302253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:02,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:02,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617302254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:02,282 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:02,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-26T10:34:02,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:02,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:02,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:02,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:02,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:02,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:02,380 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/38731f42dff248e28c1b4100d66e4c54 2024-11-26T10:34:02,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/71deba5982734d81b7c6d2a5d6c2c097 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/71deba5982734d81b7c6d2a5d6c2c097 2024-11-26T10:34:02,389 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/71deba5982734d81b7c6d2a5d6c2c097, entries=250, sequenceid=196, filesize=47.3 K 2024-11-26T10:34:02,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/91d5f5bfcf3f4cf484c3a589cb3a04b9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/91d5f5bfcf3f4cf484c3a589cb3a04b9 2024-11-26T10:34:02,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/91d5f5bfcf3f4cf484c3a589cb3a04b9, entries=150, sequenceid=196, filesize=11.9 K 2024-11-26T10:34:02,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/38731f42dff248e28c1b4100d66e4c54 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/38731f42dff248e28c1b4100d66e4c54 2024-11-26T10:34:02,398 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/38731f42dff248e28c1b4100d66e4c54, entries=150, sequenceid=196, filesize=11.9 K 2024-11-26T10:34:02,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for e4978131eaad54a00cbbf3f245fd971c in 1293ms, sequenceid=196, compaction requested=false 2024-11-26T10:34:02,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:02,435 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:02,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-26T10:34:02,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:02,436 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-26T10:34:02,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:34:02,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:02,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:34:02,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:02,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:34:02,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:02,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411266969b1fa784148cfae5124d072d059e3_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617241134/Put/seqid=0 2024-11-26T10:34:02,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742025_1201 (size=12304) 2024-11-26T10:34:02,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:02,857 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411266969b1fa784148cfae5124d072d059e3_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266969b1fa784148cfae5124d072d059e3_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:02,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/60ebdcaffaaa4c449a5180f3991dfb42, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:02,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/60ebdcaffaaa4c449a5180f3991dfb42 is 175, key is test_row_0/A:col10/1732617241134/Put/seqid=0 2024-11-26T10:34:02,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742026_1202 (size=31105) 2024-11-26T10:34:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-26T10:34:03,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:03,262 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/60ebdcaffaaa4c449a5180f3991dfb42 2024-11-26T10:34:03,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/8488a1ab74994ccf87657112e891b42d is 50, key is test_row_0/B:col10/1732617241134/Put/seqid=0 2024-11-26T10:34:03,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742027_1203 (size=12151) 2024-11-26T10:34:03,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617303280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617303281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617303280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617303281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617303384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617303384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617303384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617303384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617303586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617303587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617303587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617303587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,678 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/8488a1ab74994ccf87657112e891b42d 2024-11-26T10:34:03,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/3ec5a8216b274a5fbbcfeb4c942057f5 is 50, key is test_row_0/C:col10/1732617241134/Put/seqid=0 2024-11-26T10:34:03,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742028_1204 (size=12151) 2024-11-26T10:34:03,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617303889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617303890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617303890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:03,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:03,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617303891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,097 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/3ec5a8216b274a5fbbcfeb4c942057f5 2024-11-26T10:34:04,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/60ebdcaffaaa4c449a5180f3991dfb42 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/60ebdcaffaaa4c449a5180f3991dfb42 2024-11-26T10:34:04,106 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/60ebdcaffaaa4c449a5180f3991dfb42, entries=150, sequenceid=214, filesize=30.4 K 2024-11-26T10:34:04,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/8488a1ab74994ccf87657112e891b42d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/8488a1ab74994ccf87657112e891b42d 2024-11-26T10:34:04,111 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/8488a1ab74994ccf87657112e891b42d, entries=150, sequenceid=214, filesize=11.9 K 2024-11-26T10:34:04,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/3ec5a8216b274a5fbbcfeb4c942057f5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/3ec5a8216b274a5fbbcfeb4c942057f5 2024-11-26T10:34:04,116 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/3ec5a8216b274a5fbbcfeb4c942057f5, entries=150, sequenceid=214, filesize=11.9 K 2024-11-26T10:34:04,117 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for e4978131eaad54a00cbbf3f245fd971c in 1681ms, sequenceid=214, compaction requested=true 2024-11-26T10:34:04,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:04,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:04,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-26T10:34:04,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-26T10:34:04,119 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-26T10:34:04,119 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0550 sec 2024-11-26T10:34:04,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 3.0580 sec 2024-11-26T10:34:04,344 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:34:04,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:04,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-26T10:34:04,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:34:04,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:04,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:34:04,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:04,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:34:04,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:04,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411266954975f796340d8875276ae0aa662cd_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617243268/Put/seqid=0 2024-11-26T10:34:04,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742029_1205 (size=14794) 2024-11-26T10:34:04,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617304404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617304405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617304408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617304409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617304509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617304509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617304511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617304511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617304712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617304712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617304714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:04,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617304715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:04,817 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:04,821 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411266954975f796340d8875276ae0aa662cd_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266954975f796340d8875276ae0aa662cd_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:04,821 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9dacb4355618493fa8a3842868ca1b2e, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:04,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9dacb4355618493fa8a3842868ca1b2e is 175, key is test_row_0/A:col10/1732617243268/Put/seqid=0 2024-11-26T10:34:04,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742030_1206 (size=39749) 2024-11-26T10:34:05,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:05,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617305015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:05,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617305015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:05,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617305018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:05,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617305018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-26T10:34:05,166 INFO [Thread-728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-26T10:34:05,167 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:05,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-26T10:34:05,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-26T10:34:05,169 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:05,169 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:05,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:05,226 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9dacb4355618493fa8a3842868ca1b2e 2024-11-26T10:34:05,233 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/a71bae118bce4504965482ce0c506782 is 50, key is test_row_0/B:col10/1732617243268/Put/seqid=0 2024-11-26T10:34:05,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742031_1207 (size=12151) 2024-11-26T10:34:05,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-26T10:34:05,320 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-26T10:34:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:05,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-26T10:34:05,473 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-26T10:34:05,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:05,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:05,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:05,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:05,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617305521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:05,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617305521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:05,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617305523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:05,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617305525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,625 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-26T10:34:05,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:05,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:05,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:05,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/a71bae118bce4504965482ce0c506782 2024-11-26T10:34:05,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/044e09c8ea8d4a3298990cd75dcb7f30 is 50, key is test_row_0/C:col10/1732617243268/Put/seqid=0 2024-11-26T10:34:05,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742032_1208 (size=12151) 2024-11-26T10:34:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-26T10:34:05,778 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-26T10:34:05,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:05,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:05,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:05,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,930 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:05,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-26T10:34:05,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:05,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:05,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:05,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:06,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/044e09c8ea8d4a3298990cd75dcb7f30 2024-11-26T10:34:06,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9dacb4355618493fa8a3842868ca1b2e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9dacb4355618493fa8a3842868ca1b2e 2024-11-26T10:34:06,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9dacb4355618493fa8a3842868ca1b2e, entries=200, sequenceid=236, filesize=38.8 K 2024-11-26T10:34:06,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/a71bae118bce4504965482ce0c506782 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/a71bae118bce4504965482ce0c506782 2024-11-26T10:34:06,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/a71bae118bce4504965482ce0c506782, entries=150, sequenceid=236, filesize=11.9 K 2024-11-26T10:34:06,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/044e09c8ea8d4a3298990cd75dcb7f30 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/044e09c8ea8d4a3298990cd75dcb7f30 2024-11-26T10:34:06,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/044e09c8ea8d4a3298990cd75dcb7f30, entries=150, sequenceid=236, filesize=11.9 K 2024-11-26T10:34:06,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for e4978131eaad54a00cbbf3f245fd971c in 1675ms, sequenceid=236, compaction requested=true 2024-11-26T10:34:06,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:06,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:06,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:06,069 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:34:06,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:06,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:06,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:06,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-26T10:34:06,069 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:34:06,070 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:34:06,070 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 150724 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:34:06,070 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/A is initiating minor compaction (all files) 2024-11-26T10:34:06,071 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/B is initiating minor compaction (all files) 2024-11-26T10:34:06,071 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/A in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:06,071 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/B in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:06,071 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/930d14809acf4053b8a110f2292861a4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/91d5f5bfcf3f4cf484c3a589cb3a04b9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/8488a1ab74994ccf87657112e891b42d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/a71bae118bce4504965482ce0c506782] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=47.8 K 2024-11-26T10:34:06,071 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9251504322d348e48b5a18c2eb8cf628, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/71deba5982734d81b7c6d2a5d6c2c097, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/60ebdcaffaaa4c449a5180f3991dfb42, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9dacb4355618493fa8a3842868ca1b2e] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=147.2 K 2024-11-26T10:34:06,071 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:06,071 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9251504322d348e48b5a18c2eb8cf628, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/71deba5982734d81b7c6d2a5d6c2c097, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/60ebdcaffaaa4c449a5180f3991dfb42, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9dacb4355618493fa8a3842868ca1b2e] 2024-11-26T10:34:06,071 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 930d14809acf4053b8a110f2292861a4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732617238857 2024-11-26T10:34:06,071 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9251504322d348e48b5a18c2eb8cf628, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732617238857 2024-11-26T10:34:06,071 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 91d5f5bfcf3f4cf484c3a589cb3a04b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732617239998 2024-11-26T10:34:06,071 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71deba5982734d81b7c6d2a5d6c2c097, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732617239998 2024-11-26T10:34:06,072 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 8488a1ab74994ccf87657112e891b42d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617241128 2024-11-26T10:34:06,072 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60ebdcaffaaa4c449a5180f3991dfb42, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617241128 2024-11-26T10:34:06,072 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9dacb4355618493fa8a3842868ca1b2e, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617243268 2024-11-26T10:34:06,072 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a71bae118bce4504965482ce0c506782, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617243268 2024-11-26T10:34:06,080 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#B#compaction#176 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:06,081 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/a3311c0642e54255b532c70922104aec is 50, key is test_row_0/B:col10/1732617243268/Put/seqid=0 2024-11-26T10:34:06,082 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:06,082 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-26T10:34:06,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:06,083 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-26T10:34:06,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:34:06,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:06,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:34:06,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:06,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:34:06,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:06,091 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241126b50ae7f9b22f4e1eae4866d1e995c875_e4978131eaad54a00cbbf3f245fd971c store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:06,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742033_1209 (size=12663) 2024-11-26T10:34:06,094 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241126b50ae7f9b22f4e1eae4866d1e995c875_e4978131eaad54a00cbbf3f245fd971c, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:06,094 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126b50ae7f9b22f4e1eae4866d1e995c875_e4978131eaad54a00cbbf3f245fd971c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:06,100 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/a3311c0642e54255b532c70922104aec as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/a3311c0642e54255b532c70922104aec 2024-11-26T10:34:06,108 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/B of e4978131eaad54a00cbbf3f245fd971c into a3311c0642e54255b532c70922104aec(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:06,108 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:06,108 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/B, priority=12, startTime=1732617246069; duration=0sec 2024-11-26T10:34:06,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112630e46ca48f0f4367b695fa67cda4268d_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617244408/Put/seqid=0 2024-11-26T10:34:06,108 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:06,109 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:B 2024-11-26T10:34:06,109 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:34:06,110 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:34:06,110 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/C is initiating minor compaction (all files) 2024-11-26T10:34:06,110 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/C in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:06,111 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/2622a5ce74aa45318e235372ae636169, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/38731f42dff248e28c1b4100d66e4c54, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/3ec5a8216b274a5fbbcfeb4c942057f5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/044e09c8ea8d4a3298990cd75dcb7f30] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=47.8 K 2024-11-26T10:34:06,111 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 2622a5ce74aa45318e235372ae636169, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732617238857 2024-11-26T10:34:06,111 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 38731f42dff248e28c1b4100d66e4c54, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732617239998 2024-11-26T10:34:06,112 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ec5a8216b274a5fbbcfeb4c942057f5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617241128 2024-11-26T10:34:06,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742034_1210 (size=4469) 2024-11-26T10:34:06,112 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 044e09c8ea8d4a3298990cd75dcb7f30, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617243268 2024-11-26T10:34:06,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742035_1211 (size=12304) 2024-11-26T10:34:06,126 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#C#compaction#179 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:06,126 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/050b5da4d1c34a84a81bef08c539f888 is 50, key is test_row_0/C:col10/1732617243268/Put/seqid=0 2024-11-26T10:34:06,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:06,131 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112630e46ca48f0f4367b695fa67cda4268d_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112630e46ca48f0f4367b695fa67cda4268d_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:06,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a7dca4d8044e4737ad980f9c3c65dd39, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:06,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a7dca4d8044e4737ad980f9c3c65dd39 is 175, key is test_row_0/A:col10/1732617244408/Put/seqid=0 2024-11-26T10:34:06,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742036_1212 (size=12663) 2024-11-26T10:34:06,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742037_1213 (size=31105) 2024-11-26T10:34:06,148 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/050b5da4d1c34a84a81bef08c539f888 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/050b5da4d1c34a84a81bef08c539f888 2024-11-26T10:34:06,154 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/C of e4978131eaad54a00cbbf3f245fd971c into 050b5da4d1c34a84a81bef08c539f888(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:06,154 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:06,154 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/C, priority=12, startTime=1732617246069; duration=0sec 2024-11-26T10:34:06,154 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:06,154 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:C 2024-11-26T10:34:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-26T10:34:06,513 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#A#compaction#177 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:06,514 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/ee7c356d4fa24d0482f39107b482ffd0 is 175, key is test_row_0/A:col10/1732617243268/Put/seqid=0 2024-11-26T10:34:06,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742038_1214 (size=31617) 2024-11-26T10:34:06,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:06,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:06,543 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a7dca4d8044e4737ad980f9c3c65dd39 2024-11-26T10:34:06,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617306541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617306541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617306545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617306545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/070f1a4812564ed5a09e5e2a25cda98c is 50, key is test_row_0/B:col10/1732617244408/Put/seqid=0 2024-11-26T10:34:06,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742039_1215 (size=12151) 2024-11-26T10:34:06,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617306646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617306646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617306649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617306649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617306849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617306850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617306851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:06,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617306852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:06,923 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/ee7c356d4fa24d0482f39107b482ffd0 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee7c356d4fa24d0482f39107b482ffd0 2024-11-26T10:34:06,928 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/A of e4978131eaad54a00cbbf3f245fd971c into ee7c356d4fa24d0482f39107b482ffd0(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:06,928 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:06,928 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/A, priority=12, startTime=1732617246069; duration=0sec 2024-11-26T10:34:06,928 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:06,928 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:A 2024-11-26T10:34:06,954 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/070f1a4812564ed5a09e5e2a25cda98c 2024-11-26T10:34:06,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/ca7ab498beb94abc973a17165bc9fc4e is 50, key is test_row_0/C:col10/1732617244408/Put/seqid=0 2024-11-26T10:34:06,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742040_1216 (size=12151) 2024-11-26T10:34:07,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617307151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617307153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617307155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617307157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-26T10:34:07,365 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/ca7ab498beb94abc973a17165bc9fc4e 2024-11-26T10:34:07,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a7dca4d8044e4737ad980f9c3c65dd39 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a7dca4d8044e4737ad980f9c3c65dd39 2024-11-26T10:34:07,373 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a7dca4d8044e4737ad980f9c3c65dd39, entries=150, sequenceid=251, filesize=30.4 K 2024-11-26T10:34:07,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/070f1a4812564ed5a09e5e2a25cda98c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/070f1a4812564ed5a09e5e2a25cda98c 2024-11-26T10:34:07,378 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/070f1a4812564ed5a09e5e2a25cda98c, entries=150, sequenceid=251, filesize=11.9 K 2024-11-26T10:34:07,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/ca7ab498beb94abc973a17165bc9fc4e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/ca7ab498beb94abc973a17165bc9fc4e 2024-11-26T10:34:07,383 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/ca7ab498beb94abc973a17165bc9fc4e, entries=150, sequenceid=251, filesize=11.9 K 2024-11-26T10:34:07,383 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for e4978131eaad54a00cbbf3f245fd971c in 1300ms, sequenceid=251, compaction requested=false 2024-11-26T10:34:07,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:07,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:07,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-26T10:34:07,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-26T10:34:07,385 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-26T10:34:07,385 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2150 sec 2024-11-26T10:34:07,387 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 2.2190 sec 2024-11-26T10:34:07,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:07,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-26T10:34:07,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:34:07,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:07,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:34:07,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:07,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:34:07,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:07,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112636a3f4b381a74d64a3a99929fd45ec57_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617247655/Put/seqid=0 2024-11-26T10:34:07,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617307664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617307664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742041_1217 (size=14994) 2024-11-26T10:34:07,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617307667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617307668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617307767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617307767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617307773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617307774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617307969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617307970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617307976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:07,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:07,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617307976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:08,070 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:08,073 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112636a3f4b381a74d64a3a99929fd45ec57_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112636a3f4b381a74d64a3a99929fd45ec57_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:08,074 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/7046355acca04d9491d8bdd0f5bbbb60, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:08,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/7046355acca04d9491d8bdd0f5bbbb60 is 175, key is test_row_0/A:col10/1732617247655/Put/seqid=0 2024-11-26T10:34:08,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742042_1218 (size=39949) 2024-11-26T10:34:08,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617308274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:08,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617308274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:08,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617308277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:08,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617308278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:08,479 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/7046355acca04d9491d8bdd0f5bbbb60 2024-11-26T10:34:08,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/ae4b3554942c4cf686420dc159bdbd42 is 50, key is test_row_0/B:col10/1732617247655/Put/seqid=0 2024-11-26T10:34:08,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742043_1219 (size=12301) 2024-11-26T10:34:08,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617308780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:08,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617308781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:08,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617308781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:08,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617308783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:08,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/ae4b3554942c4cf686420dc159bdbd42 2024-11-26T10:34:08,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/57aab9c06d094d438c06a0a84a56c8a2 is 50, key is test_row_0/C:col10/1732617247655/Put/seqid=0 2024-11-26T10:34:08,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742044_1220 (size=12301) 2024-11-26T10:34:09,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-26T10:34:09,273 INFO [Thread-728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-26T10:34:09,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:09,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-26T10:34:09,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-26T10:34:09,275 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:09,275 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:09,275 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:09,305 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/57aab9c06d094d438c06a0a84a56c8a2 2024-11-26T10:34:09,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/7046355acca04d9491d8bdd0f5bbbb60 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/7046355acca04d9491d8bdd0f5bbbb60 2024-11-26T10:34:09,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/7046355acca04d9491d8bdd0f5bbbb60, entries=200, sequenceid=276, filesize=39.0 K 2024-11-26T10:34:09,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/ae4b3554942c4cf686420dc159bdbd42 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ae4b3554942c4cf686420dc159bdbd42 2024-11-26T10:34:09,319 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ae4b3554942c4cf686420dc159bdbd42, entries=150, sequenceid=276, filesize=12.0 K 2024-11-26T10:34:09,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/57aab9c06d094d438c06a0a84a56c8a2 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/57aab9c06d094d438c06a0a84a56c8a2 2024-11-26T10:34:09,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/57aab9c06d094d438c06a0a84a56c8a2, entries=150, sequenceid=276, filesize=12.0 K 2024-11-26T10:34:09,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for e4978131eaad54a00cbbf3f245fd971c in 1670ms, sequenceid=276, compaction requested=true 2024-11-26T10:34:09,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:09,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:09,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:09,326 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:09,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:09,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:09,326 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:09,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:09,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:09,327 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:09,327 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102671 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:09,327 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/B is initiating minor compaction (all files) 2024-11-26T10:34:09,327 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/A is initiating minor compaction (all files) 2024-11-26T10:34:09,327 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/B in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:09,327 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/A in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:09,328 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee7c356d4fa24d0482f39107b482ffd0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a7dca4d8044e4737ad980f9c3c65dd39, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/7046355acca04d9491d8bdd0f5bbbb60] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=100.3 K 2024-11-26T10:34:09,328 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/a3311c0642e54255b532c70922104aec, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/070f1a4812564ed5a09e5e2a25cda98c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ae4b3554942c4cf686420dc159bdbd42] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=36.2 K 2024-11-26T10:34:09,328 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:09,328 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee7c356d4fa24d0482f39107b482ffd0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a7dca4d8044e4737ad980f9c3c65dd39, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/7046355acca04d9491d8bdd0f5bbbb60] 2024-11-26T10:34:09,328 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a3311c0642e54255b532c70922104aec, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617243268 2024-11-26T10:34:09,328 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee7c356d4fa24d0482f39107b482ffd0, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617243268 2024-11-26T10:34:09,328 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 070f1a4812564ed5a09e5e2a25cda98c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732617244403 2024-11-26T10:34:09,329 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7dca4d8044e4737ad980f9c3c65dd39, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732617244403 2024-11-26T10:34:09,329 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting ae4b3554942c4cf686420dc159bdbd42, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732617246544 2024-11-26T10:34:09,329 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7046355acca04d9491d8bdd0f5bbbb60, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732617246540 2024-11-26T10:34:09,336 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#B#compaction#185 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:09,336 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/16fb7206c53441e1b8a57d173ff58364 is 50, key is test_row_0/B:col10/1732617247655/Put/seqid=0 2024-11-26T10:34:09,337 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:09,339 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241126894be84ec02541a88d9cfaa79d67e618_e4978131eaad54a00cbbf3f245fd971c store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:09,341 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241126894be84ec02541a88d9cfaa79d67e618_e4978131eaad54a00cbbf3f245fd971c, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:09,341 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126894be84ec02541a88d9cfaa79d67e618_e4978131eaad54a00cbbf3f245fd971c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:09,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742045_1221 (size=12915) 2024-11-26T10:34:09,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742046_1222 (size=4469) 2024-11-26T10:34:09,352 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#A#compaction#186 average throughput is 1.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:09,353 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/81a65d700e8b481baf099f00a56b88d7 is 175, key is test_row_0/A:col10/1732617247655/Put/seqid=0 2024-11-26T10:34:09,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742047_1223 (size=31869) 2024-11-26T10:34:09,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-26T10:34:09,426 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:09,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-26T10:34:09,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:09,427 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-26T10:34:09,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:34:09,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:09,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:34:09,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:09,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:34:09,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:09,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411261434c621d0a84ce48b57c5d188746133_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617247664/Put/seqid=0 2024-11-26T10:34:09,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742048_1224 (size=12454) 2024-11-26T10:34:09,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:09,442 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411261434c621d0a84ce48b57c5d188746133_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411261434c621d0a84ce48b57c5d188746133_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:09,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/faa41aad20d047c9a47f669c52e9a244, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:09,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/faa41aad20d047c9a47f669c52e9a244 is 175, key is test_row_0/A:col10/1732617247664/Put/seqid=0 2024-11-26T10:34:09,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742049_1225 (size=31255) 2024-11-26T10:34:09,448 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/faa41aad20d047c9a47f669c52e9a244 2024-11-26T10:34:09,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/6410d18355314764bfb87c1b7ee5739e is 50, key is test_row_0/B:col10/1732617247664/Put/seqid=0 2024-11-26T10:34:09,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742050_1226 (size=12301) 2024-11-26T10:34:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-26T10:34:09,749 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/16fb7206c53441e1b8a57d173ff58364 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/16fb7206c53441e1b8a57d173ff58364 2024-11-26T10:34:09,753 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/B of e4978131eaad54a00cbbf3f245fd971c into 16fb7206c53441e1b8a57d173ff58364(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:09,753 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:09,753 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/B, priority=13, startTime=1732617249326; duration=0sec 2024-11-26T10:34:09,753 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:09,753 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:B 2024-11-26T10:34:09,753 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:09,754 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:09,754 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/C is initiating minor compaction (all files) 2024-11-26T10:34:09,754 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/C in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:09,755 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/050b5da4d1c34a84a81bef08c539f888, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/ca7ab498beb94abc973a17165bc9fc4e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/57aab9c06d094d438c06a0a84a56c8a2] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=36.2 K 2024-11-26T10:34:09,755 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 050b5da4d1c34a84a81bef08c539f888, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617243268 2024-11-26T10:34:09,755 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting ca7ab498beb94abc973a17165bc9fc4e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732617244403 2024-11-26T10:34:09,755 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 57aab9c06d094d438c06a0a84a56c8a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732617246544 2024-11-26T10:34:09,762 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#C#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:09,763 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/fe3d62a7054d4696a9deafd739f3d84d is 50, key is test_row_0/C:col10/1732617247655/Put/seqid=0 2024-11-26T10:34:09,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742051_1227 (size=12915) 2024-11-26T10:34:09,772 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/81a65d700e8b481baf099f00a56b88d7 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/81a65d700e8b481baf099f00a56b88d7 2024-11-26T10:34:09,776 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/fe3d62a7054d4696a9deafd739f3d84d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/fe3d62a7054d4696a9deafd739f3d84d 2024-11-26T10:34:09,778 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/A of e4978131eaad54a00cbbf3f245fd971c into 81a65d700e8b481baf099f00a56b88d7(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:09,778 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:09,778 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/A, priority=13, startTime=1732617249326; duration=0sec 2024-11-26T10:34:09,778 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:09,778 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:A 2024-11-26T10:34:09,783 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/C of e4978131eaad54a00cbbf3f245fd971c into fe3d62a7054d4696a9deafd739f3d84d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:09,783 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:09,783 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/C, priority=13, startTime=1732617249326; duration=0sec 2024-11-26T10:34:09,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:09,783 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:09,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:09,783 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:C 2024-11-26T10:34:09,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:09,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617309843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:09,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:09,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617309843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:09,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:09,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617309844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:09,847 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:09,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617309844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:09,860 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/6410d18355314764bfb87c1b7ee5739e 2024-11-26T10:34:09,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/47662a4ea5994259a93d1c2223ced159 is 50, key is test_row_0/C:col10/1732617247664/Put/seqid=0 2024-11-26T10:34:09,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742052_1228 (size=12301) 2024-11-26T10:34:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-26T10:34:09,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:09,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617309945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:09,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:09,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617309945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:09,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617309947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:09,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617309948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,135 DEBUG [master/ccf62758a0a5:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region b7271e3c105b406e8a3f3f956110c7a1 changed from -1.0 to 0.0, refreshing cache 2024-11-26T10:34:10,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617310149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617310149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617310149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617310149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,273 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/47662a4ea5994259a93d1c2223ced159 2024-11-26T10:34:10,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/faa41aad20d047c9a47f669c52e9a244 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/faa41aad20d047c9a47f669c52e9a244 2024-11-26T10:34:10,281 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/faa41aad20d047c9a47f669c52e9a244, entries=150, sequenceid=291, filesize=30.5 K 2024-11-26T10:34:10,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/6410d18355314764bfb87c1b7ee5739e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/6410d18355314764bfb87c1b7ee5739e 2024-11-26T10:34:10,285 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/6410d18355314764bfb87c1b7ee5739e, entries=150, sequenceid=291, filesize=12.0 K 2024-11-26T10:34:10,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/47662a4ea5994259a93d1c2223ced159 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/47662a4ea5994259a93d1c2223ced159 2024-11-26T10:34:10,291 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/47662a4ea5994259a93d1c2223ced159, entries=150, sequenceid=291, filesize=12.0 K 2024-11-26T10:34:10,291 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for e4978131eaad54a00cbbf3f245fd971c in 864ms, sequenceid=291, compaction requested=false 2024-11-26T10:34:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:10,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:10,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-26T10:34:10,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-26T10:34:10,293 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-26T10:34:10,293 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0170 sec 2024-11-26T10:34:10,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.0200 sec 2024-11-26T10:34:10,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-26T10:34:10,378 INFO [Thread-728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-26T10:34:10,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:10,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-11-26T10:34:10,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-26T10:34:10,380 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:10,380 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:10,380 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:10,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:10,452 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-26T10:34:10,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:34:10,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:10,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:34:10,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:10,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:34:10,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:10,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126dc0812887aca43adbf541d28189a94a0_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617249839/Put/seqid=0 2024-11-26T10:34:10,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617310459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617310459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617310460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617310463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742053_1229 (size=14994) 2024-11-26T10:34:10,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-26T10:34:10,532 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:10,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:10,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:10,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:10,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:10,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:10,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:10,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617310561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617310562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617310563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617310564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,627 DEBUG [Thread-731 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x152377d4 to 127.0.0.1:61934 2024-11-26T10:34:10,628 DEBUG [Thread-731 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:10,629 DEBUG [Thread-735 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08ba8425 to 127.0.0.1:61934 2024-11-26T10:34:10,629 DEBUG [Thread-735 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:10,630 DEBUG [Thread-733 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a52344f to 127.0.0.1:61934 2024-11-26T10:34:10,630 DEBUG [Thread-733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:10,630 DEBUG [Thread-729 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68f0be85 to 127.0.0.1:61934 2024-11-26T10:34:10,630 DEBUG [Thread-729 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:10,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-26T10:34:10,684 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:10,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:10,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:10,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:10,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:10,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:10,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:10,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617310767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617310767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617310767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617310768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,837 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:10,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:10,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:10,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:10,840 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:10,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:10,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:10,878 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:10,887 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126dc0812887aca43adbf541d28189a94a0_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126dc0812887aca43adbf541d28189a94a0_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:10,888 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/52339b9cfca64ac1b99ac8cbb193a2f7, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:10,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/52339b9cfca64ac1b99ac8cbb193a2f7 is 175, key is test_row_0/A:col10/1732617249839/Put/seqid=0 2024-11-26T10:34:10,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742054_1230 (size=39949) 2024-11-26T10:34:10,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-26T10:34:10,994 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:10,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:10,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:10,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:10,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:10,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:10,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:10,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:11,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:11,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617311070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617311070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:11,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617311072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:11,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617311072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,148 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:11,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:11,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,150 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,294 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/52339b9cfca64ac1b99ac8cbb193a2f7 2024-11-26T10:34:11,304 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:11,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:11,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,305 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/44d766342e404fc2b0246ddfe987c2e5 is 50, key is test_row_0/B:col10/1732617249839/Put/seqid=0 2024-11-26T10:34:11,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742055_1231 (size=12301) 2024-11-26T10:34:11,457 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:11,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:11,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-26T10:34:11,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:11,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:11,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55238 deadline: 1732617311574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55196 deadline: 1732617311574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55252 deadline: 1732617311576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:11,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55230 deadline: 1732617311579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,614 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:11,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:11,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:11,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55276 deadline: 1732617311636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,637 DEBUG [Thread-726 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18188 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:34:11,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/44d766342e404fc2b0246ddfe987c2e5 2024-11-26T10:34:11,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/fea7dc851f4e41268934c8e99522082a is 50, key is test_row_0/C:col10/1732617249839/Put/seqid=0 2024-11-26T10:34:11,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742056_1232 (size=12301) 2024-11-26T10:34:11,768 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:11,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:11,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,924 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:11,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:11,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:11,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:11,926 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:11,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:12,079 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:12,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:12,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:12,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:12,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:12,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:12,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:12,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:12,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/fea7dc851f4e41268934c8e99522082a 2024-11-26T10:34:12,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/52339b9cfca64ac1b99ac8cbb193a2f7 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/52339b9cfca64ac1b99ac8cbb193a2f7 2024-11-26T10:34:12,135 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/52339b9cfca64ac1b99ac8cbb193a2f7, entries=200, sequenceid=317, filesize=39.0 K 2024-11-26T10:34:12,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/44d766342e404fc2b0246ddfe987c2e5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/44d766342e404fc2b0246ddfe987c2e5 2024-11-26T10:34:12,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/44d766342e404fc2b0246ddfe987c2e5, entries=150, sequenceid=317, filesize=12.0 K 2024-11-26T10:34:12,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/fea7dc851f4e41268934c8e99522082a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/fea7dc851f4e41268934c8e99522082a 2024-11-26T10:34:12,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/fea7dc851f4e41268934c8e99522082a, entries=150, sequenceid=317, filesize=12.0 K 2024-11-26T10:34:12,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for e4978131eaad54a00cbbf3f245fd971c in 1695ms, sequenceid=317, compaction requested=true 2024-11-26T10:34:12,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:12,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:12,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:12,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:12,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:12,147 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:12,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4978131eaad54a00cbbf3f245fd971c:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:12,147 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:12,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:12,148 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:12,148 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103073 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:12,149 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/A is initiating minor compaction (all files) 2024-11-26T10:34:12,149 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/B is initiating minor compaction (all files) 2024-11-26T10:34:12,149 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/B in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:12,149 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/A in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:12,149 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/16fb7206c53441e1b8a57d173ff58364, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/6410d18355314764bfb87c1b7ee5739e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/44d766342e404fc2b0246ddfe987c2e5] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=36.6 K 2024-11-26T10:34:12,149 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/81a65d700e8b481baf099f00a56b88d7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/faa41aad20d047c9a47f669c52e9a244, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/52339b9cfca64ac1b99ac8cbb193a2f7] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=100.7 K 2024-11-26T10:34:12,149 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:12,149 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/81a65d700e8b481baf099f00a56b88d7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/faa41aad20d047c9a47f669c52e9a244, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/52339b9cfca64ac1b99ac8cbb193a2f7] 2024-11-26T10:34:12,149 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 16fb7206c53441e1b8a57d173ff58364, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732617246544 2024-11-26T10:34:12,149 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81a65d700e8b481baf099f00a56b88d7, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732617246544 2024-11-26T10:34:12,149 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 6410d18355314764bfb87c1b7ee5739e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732617247663 2024-11-26T10:34:12,150 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting faa41aad20d047c9a47f669c52e9a244, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732617247663 2024-11-26T10:34:12,150 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 44d766342e404fc2b0246ddfe987c2e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732617249839 2024-11-26T10:34:12,150 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52339b9cfca64ac1b99ac8cbb193a2f7, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732617249839 2024-11-26T10:34:12,157 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#B#compaction#194 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:12,158 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/9e9fcd86c5a841d9a51a516ff5c09d4e is 50, key is test_row_0/B:col10/1732617249839/Put/seqid=0 2024-11-26T10:34:12,160 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:12,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742057_1233 (size=13017) 2024-11-26T10:34:12,163 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112617ae859a033a473da207dba6c6399c76_e4978131eaad54a00cbbf3f245fd971c store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:12,198 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112617ae859a033a473da207dba6c6399c76_e4978131eaad54a00cbbf3f245fd971c, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:12,198 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112617ae859a033a473da207dba6c6399c76_e4978131eaad54a00cbbf3f245fd971c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:12,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742058_1234 (size=4469) 2024-11-26T10:34:12,237 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:12,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-26T10:34:12,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:12,237 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:34:12,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:34:12,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:12,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:34:12,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:12,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:34:12,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:12,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411262a3edeaff6b54e42a746bb8de7a4b1e5_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617250460/Put/seqid=0 2024-11-26T10:34:12,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742059_1235 (size=12454) 2024-11-26T10:34:12,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-26T10:34:12,572 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/9e9fcd86c5a841d9a51a516ff5c09d4e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/9e9fcd86c5a841d9a51a516ff5c09d4e 2024-11-26T10:34:12,581 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/B of e4978131eaad54a00cbbf3f245fd971c into 9e9fcd86c5a841d9a51a516ff5c09d4e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:12,581 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:12,581 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/B, priority=13, startTime=1732617252147; duration=0sec 2024-11-26T10:34:12,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:12,581 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:12,581 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:B 2024-11-26T10:34:12,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. as already flushing 2024-11-26T10:34:12,581 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:12,581 DEBUG [Thread-718 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7362d978 to 127.0.0.1:61934 2024-11-26T10:34:12,581 DEBUG [Thread-718 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:12,583 DEBUG [Thread-724 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x19a533a3 to 127.0.0.1:61934 2024-11-26T10:34:12,583 DEBUG [Thread-724 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:12,583 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:12,584 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): e4978131eaad54a00cbbf3f245fd971c/C is initiating minor compaction (all files) 2024-11-26T10:34:12,584 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e4978131eaad54a00cbbf3f245fd971c/C in TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:12,584 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/fe3d62a7054d4696a9deafd739f3d84d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/47662a4ea5994259a93d1c2223ced159, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/fea7dc851f4e41268934c8e99522082a] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp, totalSize=36.6 K 2024-11-26T10:34:12,585 DEBUG [Thread-722 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ebda6ad to 127.0.0.1:61934 2024-11-26T10:34:12,585 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting fe3d62a7054d4696a9deafd739f3d84d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732617246544 2024-11-26T10:34:12,585 DEBUG [Thread-722 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:12,585 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 47662a4ea5994259a93d1c2223ced159, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732617247663 2024-11-26T10:34:12,586 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting fea7dc851f4e41268934c8e99522082a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732617249839 2024-11-26T10:34:12,587 DEBUG [Thread-720 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bad2e85 to 127.0.0.1:61934 2024-11-26T10:34:12,587 DEBUG [Thread-720 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:12,593 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#C#compaction#197 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:12,593 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/b4d4d64a8a5a4ce6a1a6e50ea9f6a1c1 is 50, key is test_row_0/C:col10/1732617249839/Put/seqid=0 2024-11-26T10:34:12,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742060_1236 (size=13017) 2024-11-26T10:34:12,603 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4978131eaad54a00cbbf3f245fd971c#A#compaction#195 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:12,603 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9f451554b7a9444ba7a840da1e8ea2dc is 175, key is test_row_0/A:col10/1732617249839/Put/seqid=0 2024-11-26T10:34:12,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742061_1237 (size=31971) 2024-11-26T10:34:12,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:12,668 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411262a3edeaff6b54e42a746bb8de7a4b1e5_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411262a3edeaff6b54e42a746bb8de7a4b1e5_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:12,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/694a2e2913e84dd3a10fcffd00a25778, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:12,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/694a2e2913e84dd3a10fcffd00a25778 is 175, key is test_row_0/A:col10/1732617250460/Put/seqid=0 2024-11-26T10:34:12,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742062_1238 (size=31255) 2024-11-26T10:34:13,008 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/b4d4d64a8a5a4ce6a1a6e50ea9f6a1c1 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/b4d4d64a8a5a4ce6a1a6e50ea9f6a1c1 2024-11-26T10:34:13,013 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/9f451554b7a9444ba7a840da1e8ea2dc as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9f451554b7a9444ba7a840da1e8ea2dc 2024-11-26T10:34:13,015 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/C of e4978131eaad54a00cbbf3f245fd971c into b4d4d64a8a5a4ce6a1a6e50ea9f6a1c1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:13,015 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:13,015 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/C, priority=13, startTime=1732617252147; duration=0sec 2024-11-26T10:34:13,015 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:13,015 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:C 2024-11-26T10:34:13,019 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e4978131eaad54a00cbbf3f245fd971c/A of e4978131eaad54a00cbbf3f245fd971c into 9f451554b7a9444ba7a840da1e8ea2dc(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:13,019 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:13,019 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c., storeName=e4978131eaad54a00cbbf3f245fd971c/A, priority=13, startTime=1732617252147; duration=0sec 2024-11-26T10:34:13,019 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:13,019 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4978131eaad54a00cbbf3f245fd971c:A 2024-11-26T10:34:13,075 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/694a2e2913e84dd3a10fcffd00a25778 2024-11-26T10:34:13,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/b53e312f80fa48c1aeace69f0831795f is 50, key is test_row_0/B:col10/1732617250460/Put/seqid=0 2024-11-26T10:34:13,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742063_1239 (size=12301) 2024-11-26T10:34:13,486 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/b53e312f80fa48c1aeace69f0831795f 2024-11-26T10:34:13,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/64df83db0f974d75b612d5d6b4714e0c is 50, key is test_row_0/C:col10/1732617250460/Put/seqid=0 2024-11-26T10:34:13,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742064_1240 (size=12301) 2024-11-26T10:34:13,908 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/64df83db0f974d75b612d5d6b4714e0c 2024-11-26T10:34:13,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/694a2e2913e84dd3a10fcffd00a25778 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/694a2e2913e84dd3a10fcffd00a25778 2024-11-26T10:34:13,927 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/694a2e2913e84dd3a10fcffd00a25778, entries=150, sequenceid=330, filesize=30.5 K 2024-11-26T10:34:13,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/b53e312f80fa48c1aeace69f0831795f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/b53e312f80fa48c1aeace69f0831795f 2024-11-26T10:34:13,932 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/b53e312f80fa48c1aeace69f0831795f, entries=150, sequenceid=330, filesize=12.0 K 2024-11-26T10:34:13,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/64df83db0f974d75b612d5d6b4714e0c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/64df83db0f974d75b612d5d6b4714e0c 2024-11-26T10:34:13,936 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/64df83db0f974d75b612d5d6b4714e0c, entries=150, sequenceid=330, filesize=12.0 K 2024-11-26T10:34:13,936 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=26.84 KB/27480 for e4978131eaad54a00cbbf3f245fd971c in 1699ms, sequenceid=330, compaction requested=false 2024-11-26T10:34:13,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:13,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:13,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-26T10:34:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-11-26T10:34:13,938 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-26T10:34:13,938 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5570 sec 2024-11-26T10:34:13,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 3.5600 sec 2024-11-26T10:34:14,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-26T10:34:14,488 INFO [Thread-728 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-26T10:34:21,681 DEBUG [Thread-726 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x465dc764 to 127.0.0.1:61934 2024-11-26T10:34:21,681 DEBUG [Thread-726 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:21,681 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 19 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7095 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6903 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2990 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8970 rows 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3004 2024-11-26T10:34:21,682 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9012 rows 2024-11-26T10:34:21,682 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-26T10:34:21,683 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04506927 to 127.0.0.1:61934 2024-11-26T10:34:21,683 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:21,685 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-26T10:34:21,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-26T10:34:21,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:21,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-26T10:34:21,690 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617261690"}]},"ts":"1732617261690"} 2024-11-26T10:34:21,691 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-26T10:34:21,778 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-26T10:34:21,780 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-26T10:34:21,783 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e4978131eaad54a00cbbf3f245fd971c, UNASSIGN}] 2024-11-26T10:34:21,785 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e4978131eaad54a00cbbf3f245fd971c, UNASSIGN 2024-11-26T10:34:21,787 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=e4978131eaad54a00cbbf3f245fd971c, regionState=CLOSING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:21,789 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-26T10:34:21,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; CloseRegionProcedure e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:34:21,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-26T10:34:21,941 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:21,942 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(124): Close e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:21,942 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-26T10:34:21,943 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1681): Closing e4978131eaad54a00cbbf3f245fd971c, disabling compactions & flushes 2024-11-26T10:34:21,943 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:21,943 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:21,943 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. after waiting 0 ms 2024-11-26T10:34:21,943 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:21,943 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(2837): Flushing e4978131eaad54a00cbbf3f245fd971c 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-26T10:34:21,944 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=A 2024-11-26T10:34:21,944 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:21,944 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=B 2024-11-26T10:34:21,944 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:21,944 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e4978131eaad54a00cbbf3f245fd971c, store=C 2024-11-26T10:34:21,944 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:21,955 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126f12605fe6d734a1a8a6801c3385b2a36_e4978131eaad54a00cbbf3f245fd971c is 50, key is test_row_0/A:col10/1732617261677/Put/seqid=0 2024-11-26T10:34:21,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742065_1241 (size=12454) 2024-11-26T10:34:21,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-26T10:34:22,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-26T10:34:22,362 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:22,370 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126f12605fe6d734a1a8a6801c3385b2a36_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f12605fe6d734a1a8a6801c3385b2a36_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:22,371 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a834a6b543f1479da55e137f82441299, store: [table=TestAcidGuarantees family=A region=e4978131eaad54a00cbbf3f245fd971c] 2024-11-26T10:34:22,372 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a834a6b543f1479da55e137f82441299 is 175, key is test_row_0/A:col10/1732617261677/Put/seqid=0 2024-11-26T10:34:22,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742066_1242 (size=31255) 2024-11-26T10:34:22,779 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=341, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a834a6b543f1479da55e137f82441299 2024-11-26T10:34:22,792 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/18eb162b22c845d8ba3f04b2d46069a9 is 50, key is test_row_0/B:col10/1732617261677/Put/seqid=0 2024-11-26T10:34:22,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-26T10:34:22,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742067_1243 (size=12301) 2024-11-26T10:34:23,198 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/18eb162b22c845d8ba3f04b2d46069a9 2024-11-26T10:34:23,207 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/d4f5bb859eb1408e92e9911300a9aa2d is 50, key is test_row_0/C:col10/1732617261677/Put/seqid=0 2024-11-26T10:34:23,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742068_1244 (size=12301) 2024-11-26T10:34:23,613 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/d4f5bb859eb1408e92e9911300a9aa2d 2024-11-26T10:34:23,624 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/A/a834a6b543f1479da55e137f82441299 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a834a6b543f1479da55e137f82441299 2024-11-26T10:34:23,630 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a834a6b543f1479da55e137f82441299, entries=150, sequenceid=341, filesize=30.5 K 2024-11-26T10:34:23,631 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/B/18eb162b22c845d8ba3f04b2d46069a9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/18eb162b22c845d8ba3f04b2d46069a9 2024-11-26T10:34:23,636 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/18eb162b22c845d8ba3f04b2d46069a9, entries=150, sequenceid=341, filesize=12.0 K 2024-11-26T10:34:23,638 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/.tmp/C/d4f5bb859eb1408e92e9911300a9aa2d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/d4f5bb859eb1408e92e9911300a9aa2d 2024-11-26T10:34:23,642 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/d4f5bb859eb1408e92e9911300a9aa2d, entries=150, sequenceid=341, filesize=12.0 K 2024-11-26T10:34:23,644 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for e4978131eaad54a00cbbf3f245fd971c in 1700ms, sequenceid=341, compaction requested=true 2024-11-26T10:34:23,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b145f03845ef403381eac3537e8aa2fa, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/fdef71fb80894a42b52a9d81c44ab3ce, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b909f922f0874cc6990f6f8d17f32bfd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/80002b85d97c4cd184681b8bbf1d2652, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b0de51c241964eaa80ea35658051c73e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a6eff5e5ffe74f49a6869290558d4e5d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9aec94a5f6aa4946b283638bf9601143, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/148fffba1c924bc9b96ab17330fb353b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/cf5d108abd0c44f1b4a3544fe25d5d84, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/2a982c27b305410083d2eefd5b532152, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee83770f41a949ce91620ef303ba209a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9251504322d348e48b5a18c2eb8cf628, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/71deba5982734d81b7c6d2a5d6c2c097, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/60ebdcaffaaa4c449a5180f3991dfb42, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9dacb4355618493fa8a3842868ca1b2e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee7c356d4fa24d0482f39107b482ffd0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a7dca4d8044e4737ad980f9c3c65dd39, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/7046355acca04d9491d8bdd0f5bbbb60, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/81a65d700e8b481baf099f00a56b88d7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/faa41aad20d047c9a47f669c52e9a244, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/52339b9cfca64ac1b99ac8cbb193a2f7] to archive 2024-11-26T10:34:23,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:34:23,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b145f03845ef403381eac3537e8aa2fa to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b145f03845ef403381eac3537e8aa2fa 2024-11-26T10:34:23,650 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/fdef71fb80894a42b52a9d81c44ab3ce to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/fdef71fb80894a42b52a9d81c44ab3ce 2024-11-26T10:34:23,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b909f922f0874cc6990f6f8d17f32bfd to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b909f922f0874cc6990f6f8d17f32bfd 2024-11-26T10:34:23,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/80002b85d97c4cd184681b8bbf1d2652 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/80002b85d97c4cd184681b8bbf1d2652 2024-11-26T10:34:23,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b0de51c241964eaa80ea35658051c73e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/b0de51c241964eaa80ea35658051c73e 2024-11-26T10:34:23,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a6eff5e5ffe74f49a6869290558d4e5d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a6eff5e5ffe74f49a6869290558d4e5d 2024-11-26T10:34:23,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9aec94a5f6aa4946b283638bf9601143 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9aec94a5f6aa4946b283638bf9601143 2024-11-26T10:34:23,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/148fffba1c924bc9b96ab17330fb353b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/148fffba1c924bc9b96ab17330fb353b 2024-11-26T10:34:23,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/cf5d108abd0c44f1b4a3544fe25d5d84 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/cf5d108abd0c44f1b4a3544fe25d5d84 2024-11-26T10:34:23,668 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/2a982c27b305410083d2eefd5b532152 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/2a982c27b305410083d2eefd5b532152 2024-11-26T10:34:23,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee83770f41a949ce91620ef303ba209a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee83770f41a949ce91620ef303ba209a 2024-11-26T10:34:23,672 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9251504322d348e48b5a18c2eb8cf628 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9251504322d348e48b5a18c2eb8cf628 2024-11-26T10:34:23,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/71deba5982734d81b7c6d2a5d6c2c097 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/71deba5982734d81b7c6d2a5d6c2c097 2024-11-26T10:34:23,676 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/60ebdcaffaaa4c449a5180f3991dfb42 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/60ebdcaffaaa4c449a5180f3991dfb42 2024-11-26T10:34:23,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9dacb4355618493fa8a3842868ca1b2e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9dacb4355618493fa8a3842868ca1b2e 2024-11-26T10:34:23,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee7c356d4fa24d0482f39107b482ffd0 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/ee7c356d4fa24d0482f39107b482ffd0 2024-11-26T10:34:23,680 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a7dca4d8044e4737ad980f9c3c65dd39 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a7dca4d8044e4737ad980f9c3c65dd39 2024-11-26T10:34:23,680 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/7046355acca04d9491d8bdd0f5bbbb60 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/7046355acca04d9491d8bdd0f5bbbb60 2024-11-26T10:34:23,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/81a65d700e8b481baf099f00a56b88d7 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/81a65d700e8b481baf099f00a56b88d7 2024-11-26T10:34:23,682 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/faa41aad20d047c9a47f669c52e9a244 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/faa41aad20d047c9a47f669c52e9a244 2024-11-26T10:34:23,683 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/52339b9cfca64ac1b99ac8cbb193a2f7 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/52339b9cfca64ac1b99ac8cbb193a2f7 2024-11-26T10:34:23,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/448d01fd6aba407c93fc3348ebb7813a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/26ba8e1df3994313aff8833179e8a838, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/91dbf6950ccd4730a5ec2e2d0df045f2, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/10a097e263ce45b2aa647c139a50972a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/11f13265fcf8471b9828912a8f6f27dd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/d187fe240026459b8c43c5bf1f2ffa13, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/cc1fdaddd8f444ab9981038305ea2898, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/281bcd856f554b7f8252aee4fb1728bd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/7d02dec060ba48fd8bf1a6088c5ffe62, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/55d677eb7121427cb7b9e86e94c8bc5a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/930d14809acf4053b8a110f2292861a4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ec002635141e4cdba0e42ac5acf7054f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/91d5f5bfcf3f4cf484c3a589cb3a04b9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/8488a1ab74994ccf87657112e891b42d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/a3311c0642e54255b532c70922104aec, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/a71bae118bce4504965482ce0c506782, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/070f1a4812564ed5a09e5e2a25cda98c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/16fb7206c53441e1b8a57d173ff58364, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ae4b3554942c4cf686420dc159bdbd42, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/6410d18355314764bfb87c1b7ee5739e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/44d766342e404fc2b0246ddfe987c2e5] to archive 2024-11-26T10:34:23,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:34:23,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/448d01fd6aba407c93fc3348ebb7813a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/448d01fd6aba407c93fc3348ebb7813a 2024-11-26T10:34:23,686 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/26ba8e1df3994313aff8833179e8a838 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/26ba8e1df3994313aff8833179e8a838 2024-11-26T10:34:23,687 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/91dbf6950ccd4730a5ec2e2d0df045f2 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/91dbf6950ccd4730a5ec2e2d0df045f2 2024-11-26T10:34:23,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/10a097e263ce45b2aa647c139a50972a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/10a097e263ce45b2aa647c139a50972a 2024-11-26T10:34:23,689 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/11f13265fcf8471b9828912a8f6f27dd to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/11f13265fcf8471b9828912a8f6f27dd 2024-11-26T10:34:23,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/d187fe240026459b8c43c5bf1f2ffa13 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/d187fe240026459b8c43c5bf1f2ffa13 2024-11-26T10:34:23,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/cc1fdaddd8f444ab9981038305ea2898 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/cc1fdaddd8f444ab9981038305ea2898 2024-11-26T10:34:23,691 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/281bcd856f554b7f8252aee4fb1728bd to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/281bcd856f554b7f8252aee4fb1728bd 2024-11-26T10:34:23,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/7d02dec060ba48fd8bf1a6088c5ffe62 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/7d02dec060ba48fd8bf1a6088c5ffe62 2024-11-26T10:34:23,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/55d677eb7121427cb7b9e86e94c8bc5a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/55d677eb7121427cb7b9e86e94c8bc5a 2024-11-26T10:34:23,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/930d14809acf4053b8a110f2292861a4 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/930d14809acf4053b8a110f2292861a4 2024-11-26T10:34:23,694 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ec002635141e4cdba0e42ac5acf7054f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ec002635141e4cdba0e42ac5acf7054f 2024-11-26T10:34:23,695 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/91d5f5bfcf3f4cf484c3a589cb3a04b9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/91d5f5bfcf3f4cf484c3a589cb3a04b9 2024-11-26T10:34:23,696 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/8488a1ab74994ccf87657112e891b42d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/8488a1ab74994ccf87657112e891b42d 2024-11-26T10:34:23,697 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/a3311c0642e54255b532c70922104aec to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/a3311c0642e54255b532c70922104aec 2024-11-26T10:34:23,698 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/a71bae118bce4504965482ce0c506782 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/a71bae118bce4504965482ce0c506782 2024-11-26T10:34:23,698 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/070f1a4812564ed5a09e5e2a25cda98c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/070f1a4812564ed5a09e5e2a25cda98c 2024-11-26T10:34:23,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/16fb7206c53441e1b8a57d173ff58364 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/16fb7206c53441e1b8a57d173ff58364 2024-11-26T10:34:23,700 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ae4b3554942c4cf686420dc159bdbd42 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/ae4b3554942c4cf686420dc159bdbd42 2024-11-26T10:34:23,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/6410d18355314764bfb87c1b7ee5739e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/6410d18355314764bfb87c1b7ee5739e 2024-11-26T10:34:23,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/44d766342e404fc2b0246ddfe987c2e5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/44d766342e404fc2b0246ddfe987c2e5 2024-11-26T10:34:23,702 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/eb3d2bcf46cc4e15b7fa4102c8274a4c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/2c55166e81d44f089c30fe6b35e4779f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/3ea95a9d322e410cb4d1d9c9f673f952, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/c4f9dfbcdaef4acfad4a92172c4ee9fc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/bd71bbb1d6b64227860ab2e719f2deeb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/102cfada9c794ef98c75215745293b37, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/89de52412b874097acdd8f06d0dadc67, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/15e7c9e17e704540bc7e70ff580f31ef, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/b3d146cd81d944a090627b0e5bb0eaab, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/84929dc17505488a897f3d539a00a90a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/2622a5ce74aa45318e235372ae636169, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/7afd49fbcac342ee953c88f889e6db9c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/38731f42dff248e28c1b4100d66e4c54, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/3ec5a8216b274a5fbbcfeb4c942057f5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/050b5da4d1c34a84a81bef08c539f888, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/044e09c8ea8d4a3298990cd75dcb7f30, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/ca7ab498beb94abc973a17165bc9fc4e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/fe3d62a7054d4696a9deafd739f3d84d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/57aab9c06d094d438c06a0a84a56c8a2, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/47662a4ea5994259a93d1c2223ced159, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/fea7dc851f4e41268934c8e99522082a] to archive 2024-11-26T10:34:23,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:34:23,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/eb3d2bcf46cc4e15b7fa4102c8274a4c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/eb3d2bcf46cc4e15b7fa4102c8274a4c 2024-11-26T10:34:23,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/2c55166e81d44f089c30fe6b35e4779f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/2c55166e81d44f089c30fe6b35e4779f 2024-11-26T10:34:23,706 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/3ea95a9d322e410cb4d1d9c9f673f952 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/3ea95a9d322e410cb4d1d9c9f673f952 2024-11-26T10:34:23,707 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/c4f9dfbcdaef4acfad4a92172c4ee9fc to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/c4f9dfbcdaef4acfad4a92172c4ee9fc 2024-11-26T10:34:23,709 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/bd71bbb1d6b64227860ab2e719f2deeb to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/bd71bbb1d6b64227860ab2e719f2deeb 2024-11-26T10:34:23,710 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/102cfada9c794ef98c75215745293b37 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/102cfada9c794ef98c75215745293b37 2024-11-26T10:34:23,711 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/89de52412b874097acdd8f06d0dadc67 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/89de52412b874097acdd8f06d0dadc67 2024-11-26T10:34:23,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/15e7c9e17e704540bc7e70ff580f31ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/15e7c9e17e704540bc7e70ff580f31ef 2024-11-26T10:34:23,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/b3d146cd81d944a090627b0e5bb0eaab to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/b3d146cd81d944a090627b0e5bb0eaab 2024-11-26T10:34:23,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/84929dc17505488a897f3d539a00a90a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/84929dc17505488a897f3d539a00a90a 2024-11-26T10:34:23,715 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/2622a5ce74aa45318e235372ae636169 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/2622a5ce74aa45318e235372ae636169 2024-11-26T10:34:23,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/7afd49fbcac342ee953c88f889e6db9c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/7afd49fbcac342ee953c88f889e6db9c 2024-11-26T10:34:23,717 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/38731f42dff248e28c1b4100d66e4c54 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/38731f42dff248e28c1b4100d66e4c54 2024-11-26T10:34:23,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/3ec5a8216b274a5fbbcfeb4c942057f5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/3ec5a8216b274a5fbbcfeb4c942057f5 2024-11-26T10:34:23,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/050b5da4d1c34a84a81bef08c539f888 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/050b5da4d1c34a84a81bef08c539f888 2024-11-26T10:34:23,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/044e09c8ea8d4a3298990cd75dcb7f30 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/044e09c8ea8d4a3298990cd75dcb7f30 2024-11-26T10:34:23,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/ca7ab498beb94abc973a17165bc9fc4e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/ca7ab498beb94abc973a17165bc9fc4e 2024-11-26T10:34:23,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/fe3d62a7054d4696a9deafd739f3d84d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/fe3d62a7054d4696a9deafd739f3d84d 2024-11-26T10:34:23,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/57aab9c06d094d438c06a0a84a56c8a2 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/57aab9c06d094d438c06a0a84a56c8a2 2024-11-26T10:34:23,728 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/47662a4ea5994259a93d1c2223ced159 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/47662a4ea5994259a93d1c2223ced159 2024-11-26T10:34:23,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/fea7dc851f4e41268934c8e99522082a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/fea7dc851f4e41268934c8e99522082a 2024-11-26T10:34:23,733 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/recovered.edits/344.seqid, newMaxSeqId=344, maxSeqId=4 2024-11-26T10:34:23,733 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c. 2024-11-26T10:34:23,733 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] regionserver.HRegion(1635): Region close journal for e4978131eaad54a00cbbf3f245fd971c: 2024-11-26T10:34:23,735 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=68}] handler.UnassignRegionHandler(170): Closed e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,735 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=e4978131eaad54a00cbbf3f245fd971c, regionState=CLOSED 2024-11-26T10:34:23,737 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-26T10:34:23,737 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; CloseRegionProcedure e4978131eaad54a00cbbf3f245fd971c, server=ccf62758a0a5,45419,1732617185877 in 1.9470 sec 2024-11-26T10:34:23,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=66 2024-11-26T10:34:23,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=66, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e4978131eaad54a00cbbf3f245fd971c, UNASSIGN in 1.9540 sec 2024-11-26T10:34:23,739 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-26T10:34:23,739 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9580 sec 2024-11-26T10:34:23,740 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617263739"}]},"ts":"1732617263739"} 2024-11-26T10:34:23,740 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-26T10:34:23,769 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-26T10:34:23,771 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0840 sec 2024-11-26T10:34:23,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-26T10:34:23,796 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-26T10:34:23,796 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-26T10:34:23,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:23,797 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=69, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:23,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-26T10:34:23,798 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=69, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:23,801 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,804 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/recovered.edits] 2024-11-26T10:34:23,807 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/694a2e2913e84dd3a10fcffd00a25778 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/694a2e2913e84dd3a10fcffd00a25778 2024-11-26T10:34:23,809 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9f451554b7a9444ba7a840da1e8ea2dc to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/9f451554b7a9444ba7a840da1e8ea2dc 2024-11-26T10:34:23,810 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a834a6b543f1479da55e137f82441299 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/A/a834a6b543f1479da55e137f82441299 2024-11-26T10:34:23,814 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/18eb162b22c845d8ba3f04b2d46069a9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/18eb162b22c845d8ba3f04b2d46069a9 2024-11-26T10:34:23,815 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/9e9fcd86c5a841d9a51a516ff5c09d4e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/9e9fcd86c5a841d9a51a516ff5c09d4e 2024-11-26T10:34:23,816 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/b53e312f80fa48c1aeace69f0831795f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/B/b53e312f80fa48c1aeace69f0831795f 2024-11-26T10:34:23,818 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/64df83db0f974d75b612d5d6b4714e0c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/64df83db0f974d75b612d5d6b4714e0c 2024-11-26T10:34:23,819 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/b4d4d64a8a5a4ce6a1a6e50ea9f6a1c1 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/b4d4d64a8a5a4ce6a1a6e50ea9f6a1c1 2024-11-26T10:34:23,820 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/d4f5bb859eb1408e92e9911300a9aa2d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/C/d4f5bb859eb1408e92e9911300a9aa2d 2024-11-26T10:34:23,822 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/recovered.edits/344.seqid to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c/recovered.edits/344.seqid 2024-11-26T10:34:23,822 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,822 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-26T10:34:23,823 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-26T10:34:23,823 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-26T10:34:23,826 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126067ab4e266e5453da817efae0a9f1066_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126067ab4e266e5453da817efae0a9f1066_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,827 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411261434c621d0a84ce48b57c5d188746133_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411261434c621d0a84ce48b57c5d188746133_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,829 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411262a3edeaff6b54e42a746bb8de7a4b1e5_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411262a3edeaff6b54e42a746bb8de7a4b1e5_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,829 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112630e46ca48f0f4367b695fa67cda4268d_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112630e46ca48f0f4367b695fa67cda4268d_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,830 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112636a3f4b381a74d64a3a99929fd45ec57_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112636a3f4b381a74d64a3a99929fd45ec57_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,831 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112641bd1bef83a749138606883b48ac5270_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112641bd1bef83a749138606883b48ac5270_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,833 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112642dad63491f84c28b42d16882969e2e4_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112642dad63491f84c28b42d16882969e2e4_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,834 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126484c40836afb4930930a1402183c9fad_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126484c40836afb4930930a1402183c9fad_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,835 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266954975f796340d8875276ae0aa662cd_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266954975f796340d8875276ae0aa662cd_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,836 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266969b1fa784148cfae5124d072d059e3_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266969b1fa784148cfae5124d072d059e3_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,837 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411269c54f931833240039bbabfb246dc2553_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411269c54f931833240039bbabfb246dc2553_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,838 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126a67ae838f1954b13ad90d6642e2aac39_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126a67ae838f1954b13ad90d6642e2aac39_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,838 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126b9ce016f5c2e44d6a37ea9f0257db73d_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126b9ce016f5c2e44d6a37ea9f0257db73d_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,839 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126d685602c37f44399a243452efa8e288f_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126d685602c37f44399a243452efa8e288f_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,840 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126d9860ff85a8843e2a6abb1f21af3e1a9_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126d9860ff85a8843e2a6abb1f21af3e1a9_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,841 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126dc0812887aca43adbf541d28189a94a0_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126dc0812887aca43adbf541d28189a94a0_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,842 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126e4b042d1ee00420ebb034e2634a83b9c_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126e4b042d1ee00420ebb034e2634a83b9c_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,843 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f12605fe6d734a1a8a6801c3385b2a36_e4978131eaad54a00cbbf3f245fd971c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f12605fe6d734a1a8a6801c3385b2a36_e4978131eaad54a00cbbf3f245fd971c 2024-11-26T10:34:23,843 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-26T10:34:23,845 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=69, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:23,849 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-26T10:34:23,851 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-26T10:34:23,852 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=69, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:23,852 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-26T10:34:23,852 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732617263852"}]},"ts":"9223372036854775807"} 2024-11-26T10:34:23,854 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-26T10:34:23,854 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e4978131eaad54a00cbbf3f245fd971c, NAME => 'TestAcidGuarantees,,1732617227269.e4978131eaad54a00cbbf3f245fd971c.', STARTKEY => '', ENDKEY => ''}] 2024-11-26T10:34:23,854 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-26T10:34:23,854 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732617263854"}]},"ts":"9223372036854775807"} 2024-11-26T10:34:23,856 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-26T10:34:23,862 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=69, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:23,862 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 66 msec 2024-11-26T10:34:23,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-26T10:34:23,899 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-26T10:34:23,907 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=237 (was 238), OpenFileDescriptor=449 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=331 (was 320) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5308 (was 5417) 2024-11-26T10:34:23,915 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=331, ProcessCount=11, AvailableMemoryMB=5308 2024-11-26T10:34:23,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-26T10:34:23,917 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:34:23,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=70, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:23,918 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:34:23,919 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:23,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 70 2024-11-26T10:34:23,919 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:34:23,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-26T10:34:23,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742069_1245 (size=963) 2024-11-26T10:34:24,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-26T10:34:24,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-26T10:34:24,329 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1 2024-11-26T10:34:24,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742070_1246 (size=53) 2024-11-26T10:34:24,340 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:34:24,340 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 0dac262a6c43b2828c5201e254d47204, disabling compactions & flushes 2024-11-26T10:34:24,341 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:24,341 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:24,341 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. after waiting 0 ms 2024-11-26T10:34:24,341 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:24,341 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:24,341 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:24,342 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:34:24,342 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732617264342"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732617264342"}]},"ts":"1732617264342"} 2024-11-26T10:34:24,343 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-26T10:34:24,344 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:34:24,344 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617264344"}]},"ts":"1732617264344"} 2024-11-26T10:34:24,345 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-26T10:34:24,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0dac262a6c43b2828c5201e254d47204, ASSIGN}] 2024-11-26T10:34:24,388 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0dac262a6c43b2828c5201e254d47204, ASSIGN 2024-11-26T10:34:24,390 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0dac262a6c43b2828c5201e254d47204, ASSIGN; state=OFFLINE, location=ccf62758a0a5,45419,1732617185877; forceNewPlan=false, retain=false 2024-11-26T10:34:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-26T10:34:24,541 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=71 updating hbase:meta row=0dac262a6c43b2828c5201e254d47204, regionState=OPENING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:24,543 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; OpenRegionProcedure 0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:34:24,697 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:24,704 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:24,705 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7285): Opening region: {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:34:24,706 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:24,706 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:34:24,706 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7327): checking encryption for 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:24,706 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(7330): checking classloading for 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:24,708 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:24,709 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:34:24,710 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0dac262a6c43b2828c5201e254d47204 columnFamilyName A 2024-11-26T10:34:24,710 DEBUG [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:24,710 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] regionserver.HStore(327): Store=0dac262a6c43b2828c5201e254d47204/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:34:24,711 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:24,712 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:34:24,712 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0dac262a6c43b2828c5201e254d47204 columnFamilyName B 2024-11-26T10:34:24,712 DEBUG [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:24,713 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] regionserver.HStore(327): Store=0dac262a6c43b2828c5201e254d47204/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:34:24,713 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:24,714 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:34:24,715 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0dac262a6c43b2828c5201e254d47204 columnFamilyName C 2024-11-26T10:34:24,715 DEBUG [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:24,715 INFO [StoreOpener-0dac262a6c43b2828c5201e254d47204-1 {}] regionserver.HStore(327): Store=0dac262a6c43b2828c5201e254d47204/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:34:24,715 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:24,716 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:24,717 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:24,719 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:34:24,721 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1085): writing seq id for 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:24,724 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:34:24,725 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1102): Opened 0dac262a6c43b2828c5201e254d47204; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72432876, jitterRate=0.07933396100997925}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:34:24,726 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegion(1001): Region open journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:24,726 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., pid=72, masterSystemTime=1732617264696 2024-11-26T10:34:24,728 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:24,728 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=72}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:24,729 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=71 updating hbase:meta row=0dac262a6c43b2828c5201e254d47204, regionState=OPEN, openSeqNum=2, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:24,732 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-26T10:34:24,732 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; OpenRegionProcedure 0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 in 187 msec 2024-11-26T10:34:24,734 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=71, resume processing ppid=70 2024-11-26T10:34:24,734 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, ppid=70, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0dac262a6c43b2828c5201e254d47204, ASSIGN in 346 msec 2024-11-26T10:34:24,735 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:34:24,735 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617264735"}]},"ts":"1732617264735"} 2024-11-26T10:34:24,736 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-26T10:34:24,797 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=70, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:34:24,800 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 880 msec 2024-11-26T10:34:25,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=70 2024-11-26T10:34:25,026 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 70 completed 2024-11-26T10:34:25,033 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64dc42d9 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58341641 2024-11-26T10:34:25,045 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17b6adc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:25,047 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:25,048 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60742, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:25,049 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:34:25,050 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44528, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:34:25,052 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c1ac389 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44645c55 2024-11-26T10:34:25,062 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@669e1999, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:25,063 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x028e73c0 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64ee0130 2024-11-26T10:34:25,070 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aa9ee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:25,072 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c480dfb to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683b64c3 2024-11-26T10:34:25,079 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ec09297, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:25,081 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34cb3991 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e55eb7 2024-11-26T10:34:25,093 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfb20f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:25,094 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e9ae050 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a703d2 2024-11-26T10:34:25,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17cf7fc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:25,105 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14ed1e44 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78b04266 2024-11-26T10:34:25,112 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5886c0f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:25,113 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72537a47 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@88aa519 2024-11-26T10:34:25,120 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66e575aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:25,121 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x036642cb to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e998dd3 2024-11-26T10:34:25,129 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@131ceb8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:25,131 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c299cfb to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e4c79b8 2024-11-26T10:34:25,145 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a78bf6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:25,146 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x605827c9 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d1403c3 2024-11-26T10:34:25,153 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328852db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:25,157 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:25,157 DEBUG [hconnection-0x57865412-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:25,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-26T10:34:25,158 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:25,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-26T10:34:25,159 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60746, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:25,159 DEBUG [hconnection-0x3e458b6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:25,159 DEBUG [hconnection-0x189c9974-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:25,159 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:25,159 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:25,160 DEBUG [hconnection-0x10da2a7b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:25,160 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60758, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:25,161 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:25,161 DEBUG [hconnection-0x797a5b52-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:25,161 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60788, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:25,161 DEBUG [hconnection-0x596f9903-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:25,163 DEBUG [hconnection-0x776ab335-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:25,163 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60816, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:25,163 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60804, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:25,163 DEBUG [hconnection-0x4bd7fb55-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:25,164 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60826, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:25,165 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:25,165 DEBUG [hconnection-0x743f16ff-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:25,165 DEBUG [hconnection-0x3205e829-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:25,166 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:25,166 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:25,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:25,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-26T10:34:25,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:25,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:25,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:25,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:25,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:25,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:25,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/e752a79261844fb1abbe3090f9815526 is 50, key is test_row_0/A:col10/1732617265162/Put/seqid=0 2024-11-26T10:34:25,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617325192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617325194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617325194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617325197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617325195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742071_1247 (size=12001) 2024-11-26T10:34:25,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/e752a79261844fb1abbe3090f9815526 2024-11-26T10:34:25,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/b02b792efc90412ca2eec5701f42908e is 50, key is test_row_0/B:col10/1732617265162/Put/seqid=0 2024-11-26T10:34:25,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-26T10:34:25,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742072_1248 (size=12001) 2024-11-26T10:34:25,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/b02b792efc90412ca2eec5701f42908e 2024-11-26T10:34:25,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617325298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617325300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617325300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617325301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617325301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/73b6d73b09624b9ca37be1f2109e233c is 50, key is test_row_0/C:col10/1732617265162/Put/seqid=0 2024-11-26T10:34:25,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742073_1249 (size=12001) 2024-11-26T10:34:25,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/73b6d73b09624b9ca37be1f2109e233c 2024-11-26T10:34:25,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/e752a79261844fb1abbe3090f9815526 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e752a79261844fb1abbe3090f9815526 2024-11-26T10:34:25,314 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-26T10:34:25,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:25,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:25,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:25,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:25,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:25,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e752a79261844fb1abbe3090f9815526, entries=150, sequenceid=15, filesize=11.7 K 2024-11-26T10:34:25,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/b02b792efc90412ca2eec5701f42908e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/b02b792efc90412ca2eec5701f42908e 2024-11-26T10:34:25,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/b02b792efc90412ca2eec5701f42908e, entries=150, sequenceid=15, filesize=11.7 K 2024-11-26T10:34:25,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/73b6d73b09624b9ca37be1f2109e233c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/73b6d73b09624b9ca37be1f2109e233c 2024-11-26T10:34:25,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/73b6d73b09624b9ca37be1f2109e233c, entries=150, sequenceid=15, filesize=11.7 K 2024-11-26T10:34:25,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 0dac262a6c43b2828c5201e254d47204 in 168ms, sequenceid=15, compaction requested=false 2024-11-26T10:34:25,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:25,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-26T10:34:25,467 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-26T10:34:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:25,468 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-26T10:34:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:25,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:25,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/0645dd61fed943efb816b6a299b6a823 is 50, key is test_row_0/A:col10/1732617265186/Put/seqid=0 2024-11-26T10:34:25,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742074_1250 (size=12001) 2024-11-26T10:34:25,478 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/0645dd61fed943efb816b6a299b6a823 2024-11-26T10:34:25,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/c5e45555c5084477aff4b95d0f9386dd is 50, key is test_row_0/B:col10/1732617265186/Put/seqid=0 2024-11-26T10:34:25,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:25,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:25,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617325508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617325509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617325509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617325509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617325510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742075_1251 (size=12001) 2024-11-26T10:34:25,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617325611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617325611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617325611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617325611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617325612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-26T10:34:25,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617325813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617325813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617325813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617325814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:25,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617325815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:25,925 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/c5e45555c5084477aff4b95d0f9386dd 2024-11-26T10:34:25,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/25d8ec7aaa564c8fa87ece51631a5251 is 50, key is test_row_0/C:col10/1732617265186/Put/seqid=0 2024-11-26T10:34:25,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742076_1252 (size=12001) 2024-11-26T10:34:26,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617326115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617326115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617326116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617326116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617326116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-26T10:34:26,342 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/25d8ec7aaa564c8fa87ece51631a5251 2024-11-26T10:34:26,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/0645dd61fed943efb816b6a299b6a823 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/0645dd61fed943efb816b6a299b6a823 2024-11-26T10:34:26,349 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/0645dd61fed943efb816b6a299b6a823, entries=150, sequenceid=37, filesize=11.7 K 2024-11-26T10:34:26,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/c5e45555c5084477aff4b95d0f9386dd as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c5e45555c5084477aff4b95d0f9386dd 2024-11-26T10:34:26,353 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c5e45555c5084477aff4b95d0f9386dd, entries=150, sequenceid=37, filesize=11.7 K 2024-11-26T10:34:26,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/25d8ec7aaa564c8fa87ece51631a5251 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/25d8ec7aaa564c8fa87ece51631a5251 2024-11-26T10:34:26,358 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/25d8ec7aaa564c8fa87ece51631a5251, entries=150, sequenceid=37, filesize=11.7 K 2024-11-26T10:34:26,359 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 0dac262a6c43b2828c5201e254d47204 in 890ms, sequenceid=37, compaction requested=false 2024-11-26T10:34:26,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:26,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:26,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-26T10:34:26,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-26T10:34:26,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-26T10:34:26,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2010 sec 2024-11-26T10:34:26,362 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.2040 sec 2024-11-26T10:34:26,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:26,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-26T10:34:26,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:26,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:26,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:26,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:26,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:26,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:26,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/e70ce4ded90a44f199ccb855ede9dc94 is 50, key is test_row_0/A:col10/1732617265506/Put/seqid=0 2024-11-26T10:34:26,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742077_1253 (size=12001) 2024-11-26T10:34:26,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617326636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617326636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617326636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617326637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617326637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617326741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617326741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617326741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617326741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617326741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617326943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617326943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617326943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617326943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:26,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617326944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/e70ce4ded90a44f199ccb855ede9dc94 2024-11-26T10:34:27,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/97a6a910a5454523996bcfdfffc8df28 is 50, key is test_row_0/B:col10/1732617265506/Put/seqid=0 2024-11-26T10:34:27,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742078_1254 (size=12001) 2024-11-26T10:34:27,043 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/97a6a910a5454523996bcfdfffc8df28 2024-11-26T10:34:27,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/a94015a34c7344d58e655d3b0f745be6 is 50, key is test_row_0/C:col10/1732617265506/Put/seqid=0 2024-11-26T10:34:27,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742079_1255 (size=12001) 2024-11-26T10:34:27,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617327245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617327246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617327246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617327247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617327247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-26T10:34:27,262 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-26T10:34:27,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:27,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-26T10:34:27,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-26T10:34:27,264 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:27,265 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:27,265 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:27,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-26T10:34:27,415 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-26T10:34:27,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:27,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:27,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:27,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:27,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:27,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:27,460 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/a94015a34c7344d58e655d3b0f745be6 2024-11-26T10:34:27,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/e70ce4ded90a44f199ccb855ede9dc94 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e70ce4ded90a44f199ccb855ede9dc94 2024-11-26T10:34:27,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e70ce4ded90a44f199ccb855ede9dc94, entries=150, sequenceid=52, filesize=11.7 K 2024-11-26T10:34:27,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/97a6a910a5454523996bcfdfffc8df28 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/97a6a910a5454523996bcfdfffc8df28 2024-11-26T10:34:27,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/97a6a910a5454523996bcfdfffc8df28, entries=150, sequenceid=52, filesize=11.7 K 2024-11-26T10:34:27,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/a94015a34c7344d58e655d3b0f745be6 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/a94015a34c7344d58e655d3b0f745be6 2024-11-26T10:34:27,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/a94015a34c7344d58e655d3b0f745be6, entries=150, sequenceid=52, filesize=11.7 K 2024-11-26T10:34:27,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 0dac262a6c43b2828c5201e254d47204 in 856ms, sequenceid=52, compaction requested=true 2024-11-26T10:34:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:27,477 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:27,477 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:27,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:27,478 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:27,478 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/B is initiating minor compaction (all files) 2024-11-26T10:34:27,478 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/B in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:27,479 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/b02b792efc90412ca2eec5701f42908e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c5e45555c5084477aff4b95d0f9386dd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/97a6a910a5454523996bcfdfffc8df28] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=35.2 K 2024-11-26T10:34:27,479 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:27,479 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/A is initiating minor compaction (all files) 2024-11-26T10:34:27,479 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/A in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:27,479 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b02b792efc90412ca2eec5701f42908e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732617265162 2024-11-26T10:34:27,479 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e752a79261844fb1abbe3090f9815526, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/0645dd61fed943efb816b6a299b6a823, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e70ce4ded90a44f199ccb855ede9dc94] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=35.2 K 2024-11-26T10:34:27,479 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting c5e45555c5084477aff4b95d0f9386dd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732617265186 2024-11-26T10:34:27,479 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting e752a79261844fb1abbe3090f9815526, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732617265162 2024-11-26T10:34:27,480 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 97a6a910a5454523996bcfdfffc8df28, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617265506 2024-11-26T10:34:27,480 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0645dd61fed943efb816b6a299b6a823, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732617265186 2024-11-26T10:34:27,480 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting e70ce4ded90a44f199ccb855ede9dc94, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617265506 2024-11-26T10:34:27,487 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#B#compaction#212 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:27,487 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/c87320d8514b48acac59a3a327719906 is 50, key is test_row_0/B:col10/1732617265506/Put/seqid=0 2024-11-26T10:34:27,487 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#A#compaction#213 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:27,487 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/1b5f5a45933e400985c30dcd14ccdc89 is 50, key is test_row_0/A:col10/1732617265506/Put/seqid=0 2024-11-26T10:34:27,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742081_1257 (size=12104) 2024-11-26T10:34:27,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742080_1256 (size=12104) 2024-11-26T10:34:27,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-26T10:34:27,567 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-26T10:34:27,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:27,568 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-26T10:34:27,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:27,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:27,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:27,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:27,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:27,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:27,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/75305929460747cd8c99e5b3b0de0a06 is 50, key is test_row_0/A:col10/1732617266636/Put/seqid=0 2024-11-26T10:34:27,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742082_1258 (size=12001) 2024-11-26T10:34:27,582 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/75305929460747cd8c99e5b3b0de0a06 2024-11-26T10:34:27,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/4b1a36e8168a4d09925b765b68577969 is 50, key is test_row_0/B:col10/1732617266636/Put/seqid=0 2024-11-26T10:34:27,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742083_1259 (size=12001) 2024-11-26T10:34:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:27,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:27,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617327784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617327786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617327787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617327787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617327787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-26T10:34:27,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617327888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617327890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617327891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617327895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:27,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617327895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:27,918 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/1b5f5a45933e400985c30dcd14ccdc89 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/1b5f5a45933e400985c30dcd14ccdc89 2024-11-26T10:34:27,920 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/c87320d8514b48acac59a3a327719906 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c87320d8514b48acac59a3a327719906 2024-11-26T10:34:27,923 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/A of 0dac262a6c43b2828c5201e254d47204 into 1b5f5a45933e400985c30dcd14ccdc89(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:27,923 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:27,923 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/A, priority=13, startTime=1732617267477; duration=0sec 2024-11-26T10:34:27,923 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:27,923 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:A 2024-11-26T10:34:27,923 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:27,925 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:27,925 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/C is initiating minor compaction (all files) 2024-11-26T10:34:27,925 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/C in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:27,925 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/73b6d73b09624b9ca37be1f2109e233c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/25d8ec7aaa564c8fa87ece51631a5251, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/a94015a34c7344d58e655d3b0f745be6] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=35.2 K 2024-11-26T10:34:27,926 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73b6d73b09624b9ca37be1f2109e233c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732617265162 2024-11-26T10:34:27,926 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25d8ec7aaa564c8fa87ece51631a5251, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732617265186 2024-11-26T10:34:27,926 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting a94015a34c7344d58e655d3b0f745be6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617265506 2024-11-26T10:34:27,929 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/B of 0dac262a6c43b2828c5201e254d47204 into c87320d8514b48acac59a3a327719906(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:27,929 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:27,929 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/B, priority=13, startTime=1732617267477; duration=0sec 2024-11-26T10:34:27,929 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:27,929 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:B 2024-11-26T10:34:27,949 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#C#compaction#216 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:27,949 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/979ee34cf5f64c37a4c14a5d2f99253a is 50, key is test_row_0/C:col10/1732617265506/Put/seqid=0 2024-11-26T10:34:27,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742084_1260 (size=12104) 2024-11-26T10:34:28,005 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/4b1a36e8168a4d09925b765b68577969 2024-11-26T10:34:28,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/eb3e7aaa7ab0455c85cd25dac8763227 is 50, key is test_row_0/C:col10/1732617266636/Put/seqid=0 2024-11-26T10:34:28,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742085_1261 (size=12001) 2024-11-26T10:34:28,018 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/eb3e7aaa7ab0455c85cd25dac8763227 2024-11-26T10:34:28,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/75305929460747cd8c99e5b3b0de0a06 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/75305929460747cd8c99e5b3b0de0a06 2024-11-26T10:34:28,026 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/75305929460747cd8c99e5b3b0de0a06, entries=150, sequenceid=73, filesize=11.7 K 2024-11-26T10:34:28,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/4b1a36e8168a4d09925b765b68577969 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/4b1a36e8168a4d09925b765b68577969 2024-11-26T10:34:28,031 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/4b1a36e8168a4d09925b765b68577969, entries=150, sequenceid=73, filesize=11.7 K 2024-11-26T10:34:28,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/eb3e7aaa7ab0455c85cd25dac8763227 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/eb3e7aaa7ab0455c85cd25dac8763227 2024-11-26T10:34:28,036 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/eb3e7aaa7ab0455c85cd25dac8763227, entries=150, sequenceid=73, filesize=11.7 K 2024-11-26T10:34:28,036 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 0dac262a6c43b2828c5201e254d47204 in 468ms, sequenceid=73, compaction requested=false 2024-11-26T10:34:28,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:28,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:28,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-26T10:34:28,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-26T10:34:28,038 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-26T10:34:28,038 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 772 msec 2024-11-26T10:34:28,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 775 msec 2024-11-26T10:34:28,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:28,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-26T10:34:28,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:28,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:28,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:28,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:28,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:28,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:28,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/feff1918ff694adfa916cc414f8f22de is 50, key is test_row_0/A:col10/1732617268093/Put/seqid=0 2024-11-26T10:34:28,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742086_1262 (size=12001) 2024-11-26T10:34:28,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/feff1918ff694adfa916cc414f8f22de 2024-11-26T10:34:28,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/ae56c432499e4456852d0248fbd22c04 is 50, key is test_row_0/B:col10/1732617268093/Put/seqid=0 2024-11-26T10:34:28,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617328110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617328110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617328111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617328115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617328115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742087_1263 (size=12001) 2024-11-26T10:34:28,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/ae56c432499e4456852d0248fbd22c04 2024-11-26T10:34:28,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0359b2a6214a4ec598d86ab425a95ef7 is 50, key is test_row_0/C:col10/1732617268093/Put/seqid=0 2024-11-26T10:34:28,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742088_1264 (size=12001) 2024-11-26T10:34:28,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0359b2a6214a4ec598d86ab425a95ef7 2024-11-26T10:34:28,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/feff1918ff694adfa916cc414f8f22de as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/feff1918ff694adfa916cc414f8f22de 2024-11-26T10:34:28,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/feff1918ff694adfa916cc414f8f22de, entries=150, sequenceid=94, filesize=11.7 K 2024-11-26T10:34:28,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/ae56c432499e4456852d0248fbd22c04 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ae56c432499e4456852d0248fbd22c04 2024-11-26T10:34:28,175 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ae56c432499e4456852d0248fbd22c04, entries=150, sequenceid=94, filesize=11.7 K 2024-11-26T10:34:28,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0359b2a6214a4ec598d86ab425a95ef7 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0359b2a6214a4ec598d86ab425a95ef7 2024-11-26T10:34:28,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0359b2a6214a4ec598d86ab425a95ef7, entries=150, sequenceid=94, filesize=11.7 K 2024-11-26T10:34:28,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 0dac262a6c43b2828c5201e254d47204 in 87ms, sequenceid=94, compaction requested=true 2024-11-26T10:34:28,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:28,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:A, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:28,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:28,181 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:28,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:B, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:28,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:28,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:28,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-26T10:34:28,182 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:28,183 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/A is initiating minor compaction (all files) 2024-11-26T10:34:28,183 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/A in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:28,183 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/1b5f5a45933e400985c30dcd14ccdc89, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/75305929460747cd8c99e5b3b0de0a06, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/feff1918ff694adfa916cc414f8f22de] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=35.3 K 2024-11-26T10:34:28,183 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b5f5a45933e400985c30dcd14ccdc89, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617265506 2024-11-26T10:34:28,183 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 75305929460747cd8c99e5b3b0de0a06, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732617266636 2024-11-26T10:34:28,183 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting feff1918ff694adfa916cc414f8f22de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732617267786 2024-11-26T10:34:28,191 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#A#compaction#221 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:28,191 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/0f2bbab550ed47efb91b34fc0d24adad is 50, key is test_row_0/A:col10/1732617268093/Put/seqid=0 2024-11-26T10:34:28,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742089_1265 (size=12207) 2024-11-26T10:34:28,200 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/0f2bbab550ed47efb91b34fc0d24adad as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/0f2bbab550ed47efb91b34fc0d24adad 2024-11-26T10:34:28,205 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/A of 0dac262a6c43b2828c5201e254d47204 into 0f2bbab550ed47efb91b34fc0d24adad(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:28,205 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:28,205 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/A, priority=13, startTime=1732617268181; duration=0sec 2024-11-26T10:34:28,205 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-26T10:34:28,205 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:A 2024-11-26T10:34:28,205 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 3 compacting, 2 eligible, 16 blocking 2024-11-26T10:34:28,206 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-26T10:34:28,206 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-26T10:34:28,206 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. because compaction request was cancelled 2024-11-26T10:34:28,206 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:C 2024-11-26T10:34:28,206 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:28,207 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:28,207 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/B is initiating minor compaction (all files) 2024-11-26T10:34:28,207 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/B in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:28,208 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c87320d8514b48acac59a3a327719906, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/4b1a36e8168a4d09925b765b68577969, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ae56c432499e4456852d0248fbd22c04] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=35.3 K 2024-11-26T10:34:28,208 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting c87320d8514b48acac59a3a327719906, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617265506 2024-11-26T10:34:28,208 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b1a36e8168a4d09925b765b68577969, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732617266636 2024-11-26T10:34:28,209 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting ae56c432499e4456852d0248fbd22c04, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732617267786 2024-11-26T10:34:28,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:28,218 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#B#compaction#222 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:28,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-26T10:34:28,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:28,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:28,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:28,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:28,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:28,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:28,219 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/2b10ef81bd454e02ac96197d8bbcfb02 is 50, key is test_row_0/B:col10/1732617268093/Put/seqid=0 2024-11-26T10:34:28,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/6843f35503e249f0810f11ca3ea930cf is 50, key is test_row_0/A:col10/1732617268217/Put/seqid=0 2024-11-26T10:34:28,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742090_1266 (size=12207) 2024-11-26T10:34:28,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617328228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617328230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617328231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617328233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617328234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,238 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/2b10ef81bd454e02ac96197d8bbcfb02 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/2b10ef81bd454e02ac96197d8bbcfb02 2024-11-26T10:34:28,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742091_1267 (size=12001) 2024-11-26T10:34:28,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/6843f35503e249f0810f11ca3ea930cf 2024-11-26T10:34:28,245 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/B of 0dac262a6c43b2828c5201e254d47204 into 2b10ef81bd454e02ac96197d8bbcfb02(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:28,245 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:28,245 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/B, priority=13, startTime=1732617268181; duration=0sec 2024-11-26T10:34:28,245 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:28,245 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:B 2024-11-26T10:34:28,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/3eed9cd449e7466b8abe3702e77214ad is 50, key is test_row_0/B:col10/1732617268217/Put/seqid=0 2024-11-26T10:34:28,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742092_1268 (size=12001) 2024-11-26T10:34:28,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617328334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617328335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617328337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617328337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617328338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-26T10:34:28,367 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-26T10:34:28,367 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:28,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-26T10:34:28,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-26T10:34:28,368 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:28,369 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:28,369 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:28,373 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/979ee34cf5f64c37a4c14a5d2f99253a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/979ee34cf5f64c37a4c14a5d2f99253a 2024-11-26T10:34:28,377 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/C of 0dac262a6c43b2828c5201e254d47204 into 979ee34cf5f64c37a4c14a5d2f99253a(size=11.8 K), total size for store is 35.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:28,377 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:28,377 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/C, priority=13, startTime=1732617267477; duration=0sec 2024-11-26T10:34:28,377 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:28,377 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:C 2024-11-26T10:34:28,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-26T10:34:28,520 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-26T10:34:28,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:28,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:28,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:28,521 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:28,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:28,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:28,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617328537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617328538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617328540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617328541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617328541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/3eed9cd449e7466b8abe3702e77214ad 2024-11-26T10:34:28,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/2c728f3209a64be897ffef1a7e29bc85 is 50, key is test_row_0/C:col10/1732617268217/Put/seqid=0 2024-11-26T10:34:28,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742093_1269 (size=12001) 2024-11-26T10:34:28,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-26T10:34:28,672 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-26T10:34:28,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:28,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:28,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:28,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:28,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:28,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:28,824 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-26T10:34:28,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:28,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:28,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:28,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:28,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:28,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:28,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617328838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617328841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617328842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617328842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:28,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617328844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-26T10:34:28,977 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:28,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-26T10:34:28,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:28,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:28,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:28,977 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:28,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:28,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:29,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/2c728f3209a64be897ffef1a7e29bc85 2024-11-26T10:34:29,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/6843f35503e249f0810f11ca3ea930cf as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/6843f35503e249f0810f11ca3ea930cf 2024-11-26T10:34:29,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/6843f35503e249f0810f11ca3ea930cf, entries=150, sequenceid=113, filesize=11.7 K 2024-11-26T10:34:29,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/3eed9cd449e7466b8abe3702e77214ad as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/3eed9cd449e7466b8abe3702e77214ad 2024-11-26T10:34:29,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/3eed9cd449e7466b8abe3702e77214ad, entries=150, sequenceid=113, filesize=11.7 K 2024-11-26T10:34:29,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/2c728f3209a64be897ffef1a7e29bc85 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/2c728f3209a64be897ffef1a7e29bc85 2024-11-26T10:34:29,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/2c728f3209a64be897ffef1a7e29bc85, entries=150, sequenceid=113, filesize=11.7 K 2024-11-26T10:34:29,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 0dac262a6c43b2828c5201e254d47204 in 862ms, sequenceid=113, compaction requested=true 2024-11-26T10:34:29,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:29,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:29,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:29,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:29,080 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-26T10:34:29,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:29,080 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-26T10:34:29,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:29,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:29,081 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-26T10:34:29,081 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-26T10:34:29,081 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-26T10:34:29,081 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-26T10:34:29,081 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. because compaction request was cancelled 2024-11-26T10:34:29,081 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. because compaction request was cancelled 2024-11-26T10:34:29,081 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:A 2024-11-26T10:34:29,081 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:B 2024-11-26T10:34:29,081 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:34:29,082 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:34:29,082 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/C is initiating minor compaction (all files) 2024-11-26T10:34:29,082 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/C in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:29,082 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/979ee34cf5f64c37a4c14a5d2f99253a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/eb3e7aaa7ab0455c85cd25dac8763227, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0359b2a6214a4ec598d86ab425a95ef7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/2c728f3209a64be897ffef1a7e29bc85] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=47.0 K 2024-11-26T10:34:29,084 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 979ee34cf5f64c37a4c14a5d2f99253a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732617265506 2024-11-26T10:34:29,084 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting eb3e7aaa7ab0455c85cd25dac8763227, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732617266636 2024-11-26T10:34:29,084 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 0359b2a6214a4ec598d86ab425a95ef7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732617267786 2024-11-26T10:34:29,084 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c728f3209a64be897ffef1a7e29bc85, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732617268104 2024-11-26T10:34:29,092 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#C#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:29,092 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/2d9c7f26d6ff4abab1fea3b05e739e88 is 50, key is test_row_0/C:col10/1732617268217/Put/seqid=0 2024-11-26T10:34:29,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742094_1270 (size=12241) 2024-11-26T10:34:29,129 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,129 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-26T10:34:29,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:29,130 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-26T10:34:29,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:29,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:29,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:29,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:29,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:29,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:29,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/784d5a145ebb48bb9fdf7f25107d2ca7 is 50, key is test_row_1/A:col10/1732617268232/Put/seqid=0 2024-11-26T10:34:29,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742095_1271 (size=9757) 2024-11-26T10:34:29,261 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-26T10:34:29,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:29,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:29,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617329356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617329357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617329356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617329357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617329357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617329460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617329460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617329460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617329460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617329460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-26T10:34:29,506 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/2d9c7f26d6ff4abab1fea3b05e739e88 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/2d9c7f26d6ff4abab1fea3b05e739e88 2024-11-26T10:34:29,511 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/C of 0dac262a6c43b2828c5201e254d47204 into 2d9c7f26d6ff4abab1fea3b05e739e88(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:29,511 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:29,511 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/C, priority=12, startTime=1732617269080; duration=0sec 2024-11-26T10:34:29,511 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:29,511 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:C 2024-11-26T10:34:29,537 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/784d5a145ebb48bb9fdf7f25107d2ca7 2024-11-26T10:34:29,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/1c202d2444584c0ba743cdf3291b459a is 50, key is test_row_1/B:col10/1732617268232/Put/seqid=0 2024-11-26T10:34:29,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742096_1272 (size=9757) 2024-11-26T10:34:29,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617329662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617329662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617329662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617329663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617329663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,947 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/1c202d2444584c0ba743cdf3291b459a 2024-11-26T10:34:29,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/d06bbcf4acdf4cf99db391839fa33003 is 50, key is test_row_1/C:col10/1732617268232/Put/seqid=0 2024-11-26T10:34:29,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742097_1273 (size=9757) 2024-11-26T10:34:29,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617329965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617329965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617329965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617329966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:29,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:29,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617329967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,357 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/d06bbcf4acdf4cf99db391839fa33003 2024-11-26T10:34:30,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/784d5a145ebb48bb9fdf7f25107d2ca7 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/784d5a145ebb48bb9fdf7f25107d2ca7 2024-11-26T10:34:30,364 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/784d5a145ebb48bb9fdf7f25107d2ca7, entries=100, sequenceid=134, filesize=9.5 K 2024-11-26T10:34:30,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/1c202d2444584c0ba743cdf3291b459a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1c202d2444584c0ba743cdf3291b459a 2024-11-26T10:34:30,369 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1c202d2444584c0ba743cdf3291b459a, entries=100, sequenceid=134, filesize=9.5 K 2024-11-26T10:34:30,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/d06bbcf4acdf4cf99db391839fa33003 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d06bbcf4acdf4cf99db391839fa33003 2024-11-26T10:34:30,373 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d06bbcf4acdf4cf99db391839fa33003, entries=100, sequenceid=134, filesize=9.5 K 2024-11-26T10:34:30,374 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=107.34 KB/109920 for 0dac262a6c43b2828c5201e254d47204 in 1244ms, sequenceid=134, compaction requested=true 2024-11-26T10:34:30,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:30,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:30,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-26T10:34:30,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-26T10:34:30,376 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-26T10:34:30,376 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0060 sec 2024-11-26T10:34:30,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 2.0090 sec 2024-11-26T10:34:30,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:30,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-26T10:34:30,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:30,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:30,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:30,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:30,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:30,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:30,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-26T10:34:30,472 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-26T10:34:30,473 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:30,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/df061021645b45d28deabd0d0e297123 is 50, key is test_row_0/A:col10/1732617269356/Put/seqid=0 2024-11-26T10:34:30,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-26T10:34:30,474 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-26T10:34:30,475 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:30,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:30,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742098_1274 (size=12151) 2024-11-26T10:34:30,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617330480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617330480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617330481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617330481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617330481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-26T10:34:30,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617330583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617330584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617330584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617330584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617330584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,626 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-26T10:34:30,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:30,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:30,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:30,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:30,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:30,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-26T10:34:30,778 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-26T10:34:30,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:30,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:30,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:30,779 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:30,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:30,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617330786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617330785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617330787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617330787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:30,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617330788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/df061021645b45d28deabd0d0e297123 2024-11-26T10:34:30,884 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/351c9f4d74274f7daf4565ae20012cc4 is 50, key is test_row_0/B:col10/1732617269356/Put/seqid=0 2024-11-26T10:34:30,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742099_1275 (size=12151) 2024-11-26T10:34:30,931 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:30,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-26T10:34:30,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:30,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:30,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:30,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:30,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:30,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-26T10:34:31,083 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-26T10:34:31,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:31,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:31,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617331089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:31,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617331089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:31,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617331089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:31,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617331090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:31,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617331091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,235 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-26T10:34:31,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:31,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/351c9f4d74274f7daf4565ae20012cc4 2024-11-26T10:34:31,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/d604a6b4e723415fa64720ed7cab4e0e is 50, key is test_row_0/C:col10/1732617269356/Put/seqid=0 2024-11-26T10:34:31,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742100_1276 (size=12151) 2024-11-26T10:34:31,388 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-26T10:34:31,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,541 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-26T10:34:31,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:31,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-26T10:34:31,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:31,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617331592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617331594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617331594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617331594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617331595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,693 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-26T10:34:31,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:31,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:31,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/d604a6b4e723415fa64720ed7cab4e0e 2024-11-26T10:34:31,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/df061021645b45d28deabd0d0e297123 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/df061021645b45d28deabd0d0e297123 2024-11-26T10:34:31,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/df061021645b45d28deabd0d0e297123, entries=150, sequenceid=155, filesize=11.9 K 2024-11-26T10:34:31,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/351c9f4d74274f7daf4565ae20012cc4 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/351c9f4d74274f7daf4565ae20012cc4 2024-11-26T10:34:31,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/351c9f4d74274f7daf4565ae20012cc4, entries=150, sequenceid=155, filesize=11.9 K 2024-11-26T10:34:31,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/d604a6b4e723415fa64720ed7cab4e0e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d604a6b4e723415fa64720ed7cab4e0e 2024-11-26T10:34:31,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d604a6b4e723415fa64720ed7cab4e0e, entries=150, sequenceid=155, filesize=11.9 K 2024-11-26T10:34:31,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 0dac262a6c43b2828c5201e254d47204 in 1248ms, sequenceid=155, compaction requested=true 2024-11-26T10:34:31,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:31,717 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:34:31,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:31,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:31,718 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:34:31,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:31,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:31,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:31,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:31,718 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46116 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:34:31,719 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/A is initiating minor compaction (all files) 2024-11-26T10:34:31,719 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46116 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:34:31,719 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/B is initiating minor compaction (all files) 2024-11-26T10:34:31,719 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/A in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,719 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/B in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,719 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/0f2bbab550ed47efb91b34fc0d24adad, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/6843f35503e249f0810f11ca3ea930cf, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/784d5a145ebb48bb9fdf7f25107d2ca7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/df061021645b45d28deabd0d0e297123] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=45.0 K 2024-11-26T10:34:31,719 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/2b10ef81bd454e02ac96197d8bbcfb02, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/3eed9cd449e7466b8abe3702e77214ad, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1c202d2444584c0ba743cdf3291b459a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/351c9f4d74274f7daf4565ae20012cc4] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=45.0 K 2024-11-26T10:34:31,719 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b10ef81bd454e02ac96197d8bbcfb02, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732617267786 2024-11-26T10:34:31,719 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 3eed9cd449e7466b8abe3702e77214ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732617268104 2024-11-26T10:34:31,719 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f2bbab550ed47efb91b34fc0d24adad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732617267786 2024-11-26T10:34:31,720 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c202d2444584c0ba743cdf3291b459a, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732617268231 2024-11-26T10:34:31,720 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6843f35503e249f0810f11ca3ea930cf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732617268104 2024-11-26T10:34:31,720 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 351c9f4d74274f7daf4565ae20012cc4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732617269356 2024-11-26T10:34:31,720 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 784d5a145ebb48bb9fdf7f25107d2ca7, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732617268231 2024-11-26T10:34:31,720 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting df061021645b45d28deabd0d0e297123, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732617269356 2024-11-26T10:34:31,727 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#B#compaction#233 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:31,728 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/a7f9e985d5d94ac4ad5b680e3403505e is 50, key is test_row_0/B:col10/1732617269356/Put/seqid=0 2024-11-26T10:34:31,729 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#A#compaction#234 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:31,729 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/9b546183331b491d98509d8f69616090 is 50, key is test_row_0/A:col10/1732617269356/Put/seqid=0 2024-11-26T10:34:31,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742101_1277 (size=12493) 2024-11-26T10:34:31,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742102_1278 (size=12493) 2024-11-26T10:34:31,846 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:31,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-26T10:34:31,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:31,846 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-26T10:34:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:31,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:31,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/d364621ffe5e42f8a838de0ab5792b3b is 50, key is test_row_0/A:col10/1732617270480/Put/seqid=0 2024-11-26T10:34:31,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742103_1279 (size=12151) 2024-11-26T10:34:32,140 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/a7f9e985d5d94ac4ad5b680e3403505e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a7f9e985d5d94ac4ad5b680e3403505e 2024-11-26T10:34:32,141 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/9b546183331b491d98509d8f69616090 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/9b546183331b491d98509d8f69616090 2024-11-26T10:34:32,145 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/B of 0dac262a6c43b2828c5201e254d47204 into a7f9e985d5d94ac4ad5b680e3403505e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:32,145 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:32,145 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/B, priority=12, startTime=1732617271718; duration=0sec 2024-11-26T10:34:32,145 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:32,145 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:B 2024-11-26T10:34:32,145 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:32,146 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:32,146 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/C is initiating minor compaction (all files) 2024-11-26T10:34:32,146 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/C in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:32,146 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/2d9c7f26d6ff4abab1fea3b05e739e88, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d06bbcf4acdf4cf99db391839fa33003, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d604a6b4e723415fa64720ed7cab4e0e] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=33.3 K 2024-11-26T10:34:32,146 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d9c7f26d6ff4abab1fea3b05e739e88, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732617268104 2024-11-26T10:34:32,147 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting d06bbcf4acdf4cf99db391839fa33003, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732617268231 2024-11-26T10:34:32,147 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting d604a6b4e723415fa64720ed7cab4e0e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732617269356 2024-11-26T10:34:32,149 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/A of 0dac262a6c43b2828c5201e254d47204 into 9b546183331b491d98509d8f69616090(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:32,149 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:32,149 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/A, priority=12, startTime=1732617271717; duration=0sec 2024-11-26T10:34:32,149 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:32,149 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:A 2024-11-26T10:34:32,155 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#C#compaction#236 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:32,155 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0fe6dcb9fea04cb2833dfb628ca5d7dd is 50, key is test_row_0/C:col10/1732617269356/Put/seqid=0 2024-11-26T10:34:32,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742104_1280 (size=12493) 2024-11-26T10:34:32,274 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/d364621ffe5e42f8a838de0ab5792b3b 2024-11-26T10:34:32,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/9c7c7297d9a94228b096ce05eac9151a is 50, key is test_row_0/B:col10/1732617270480/Put/seqid=0 2024-11-26T10:34:32,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742105_1281 (size=12151) 2024-11-26T10:34:32,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-26T10:34:32,583 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0fe6dcb9fea04cb2833dfb628ca5d7dd as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0fe6dcb9fea04cb2833dfb628ca5d7dd 2024-11-26T10:34:32,588 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/C of 0dac262a6c43b2828c5201e254d47204 into 0fe6dcb9fea04cb2833dfb628ca5d7dd(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:32,588 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:32,588 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/C, priority=13, startTime=1732617271718; duration=0sec 2024-11-26T10:34:32,588 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:32,588 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:C 2024-11-26T10:34:32,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:32,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:32,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617332609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617332609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617332612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617332613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617332612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,684 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/9c7c7297d9a94228b096ce05eac9151a 2024-11-26T10:34:32,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/1417dd1ebcdc4283b3db5f759ca3a9e5 is 50, key is test_row_0/C:col10/1732617270480/Put/seqid=0 2024-11-26T10:34:32,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742106_1282 (size=12151) 2024-11-26T10:34:32,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617332713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617332713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617332716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617332716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617332716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617332916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617332916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617332918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617332918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:32,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:32,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617332919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,093 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/1417dd1ebcdc4283b3db5f759ca3a9e5 2024-11-26T10:34:33,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/d364621ffe5e42f8a838de0ab5792b3b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/d364621ffe5e42f8a838de0ab5792b3b 2024-11-26T10:34:33,099 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/d364621ffe5e42f8a838de0ab5792b3b, entries=150, sequenceid=172, filesize=11.9 K 2024-11-26T10:34:33,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/9c7c7297d9a94228b096ce05eac9151a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/9c7c7297d9a94228b096ce05eac9151a 2024-11-26T10:34:33,103 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/9c7c7297d9a94228b096ce05eac9151a, entries=150, sequenceid=172, filesize=11.9 K 2024-11-26T10:34:33,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/1417dd1ebcdc4283b3db5f759ca3a9e5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/1417dd1ebcdc4283b3db5f759ca3a9e5 2024-11-26T10:34:33,107 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/1417dd1ebcdc4283b3db5f759ca3a9e5, entries=150, sequenceid=172, filesize=11.9 K 2024-11-26T10:34:33,108 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 0dac262a6c43b2828c5201e254d47204 in 1262ms, sequenceid=172, compaction requested=false 2024-11-26T10:34:33,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:33,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:33,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-26T10:34:33,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-26T10:34:33,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-26T10:34:33,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6340 sec 2024-11-26T10:34:33,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 2.6370 sec 2024-11-26T10:34:33,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:33,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-26T10:34:33,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:33,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:33,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:33,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:33,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:33,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:33,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/2ca4ce2cfab3496a9e9c2c59e6730df8 is 50, key is test_row_0/A:col10/1732617272611/Put/seqid=0 2024-11-26T10:34:33,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742107_1283 (size=12151) 2024-11-26T10:34:33,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617333228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617333230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617333230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617333231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617333231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617333334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617333334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617333335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617333335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617333335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617333537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617333537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617333537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617333538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617333538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/2ca4ce2cfab3496a9e9c2c59e6730df8 2024-11-26T10:34:33,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/23a7a7b8832a4051ab73202614ce2d0c is 50, key is test_row_0/B:col10/1732617272611/Put/seqid=0 2024-11-26T10:34:33,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742108_1284 (size=12151) 2024-11-26T10:34:33,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617333839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617333840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617333840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617333841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:33,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:33,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617333841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:34,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/23a7a7b8832a4051ab73202614ce2d0c 2024-11-26T10:34:34,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/297cc4d25a5a4453900eea3faf8d1f46 is 50, key is test_row_0/C:col10/1732617272611/Put/seqid=0 2024-11-26T10:34:34,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742109_1285 (size=12151) 2024-11-26T10:34:34,344 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:34:34,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:34,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617334344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:34,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:34,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617334346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:34,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:34,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617334346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:34,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:34,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617334346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:34,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:34,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617334348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:34,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/297cc4d25a5a4453900eea3faf8d1f46 2024-11-26T10:34:34,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/2ca4ce2cfab3496a9e9c2c59e6730df8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/2ca4ce2cfab3496a9e9c2c59e6730df8 2024-11-26T10:34:34,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/2ca4ce2cfab3496a9e9c2c59e6730df8, entries=150, sequenceid=197, filesize=11.9 K 2024-11-26T10:34:34,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/23a7a7b8832a4051ab73202614ce2d0c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/23a7a7b8832a4051ab73202614ce2d0c 2024-11-26T10:34:34,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/23a7a7b8832a4051ab73202614ce2d0c, entries=150, sequenceid=197, filesize=11.9 K 2024-11-26T10:34:34,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/297cc4d25a5a4453900eea3faf8d1f46 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/297cc4d25a5a4453900eea3faf8d1f46 2024-11-26T10:34:34,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/297cc4d25a5a4453900eea3faf8d1f46, entries=150, sequenceid=197, filesize=11.9 K 2024-11-26T10:34:34,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 0dac262a6c43b2828c5201e254d47204 in 1259ms, sequenceid=197, compaction requested=true 2024-11-26T10:34:34,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:34,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:34,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:34,480 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:34,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:34,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:34,480 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:34,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:34,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:34,481 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:34,481 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:34,481 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/B is initiating minor compaction (all files) 2024-11-26T10:34:34,481 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/A is initiating minor compaction (all files) 2024-11-26T10:34:34,481 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/B in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:34,481 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/A in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:34,481 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a7f9e985d5d94ac4ad5b680e3403505e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/9c7c7297d9a94228b096ce05eac9151a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/23a7a7b8832a4051ab73202614ce2d0c] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=35.9 K 2024-11-26T10:34:34,481 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/9b546183331b491d98509d8f69616090, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/d364621ffe5e42f8a838de0ab5792b3b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/2ca4ce2cfab3496a9e9c2c59e6730df8] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=35.9 K 2024-11-26T10:34:34,482 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a7f9e985d5d94ac4ad5b680e3403505e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732617269356 2024-11-26T10:34:34,482 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b546183331b491d98509d8f69616090, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732617269356 2024-11-26T10:34:34,482 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting d364621ffe5e42f8a838de0ab5792b3b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732617270480 2024-11-26T10:34:34,482 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c7c7297d9a94228b096ce05eac9151a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732617270480 2024-11-26T10:34:34,482 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ca4ce2cfab3496a9e9c2c59e6730df8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732617272607 2024-11-26T10:34:34,482 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 23a7a7b8832a4051ab73202614ce2d0c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732617272607 2024-11-26T10:34:34,488 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#B#compaction#242 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:34,488 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#A#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:34,488 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/bf0b8094912c4c03819a0448ed7f0ea3 is 50, key is test_row_0/A:col10/1732617272611/Put/seqid=0 2024-11-26T10:34:34,488 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/210e6eee7b744721a736b2e39b62580b is 50, key is test_row_0/B:col10/1732617272611/Put/seqid=0 2024-11-26T10:34:34,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742111_1287 (size=12595) 2024-11-26T10:34:34,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742110_1286 (size=12595) 2024-11-26T10:34:34,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-26T10:34:34,580 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-26T10:34:34,581 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:34,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-26T10:34:34,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-26T10:34:34,582 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:34,582 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:34,582 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:34,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-26T10:34:34,733 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:34,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-26T10:34:34,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:34,734 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-26T10:34:34,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:34,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:34,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:34,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:34,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:34,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:34,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/9a03bdfc134e410180076bc2ba4c62e1 is 50, key is test_row_0/A:col10/1732617273225/Put/seqid=0 2024-11-26T10:34:34,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742112_1288 (size=12151) 2024-11-26T10:34:34,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-26T10:34:34,896 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/210e6eee7b744721a736b2e39b62580b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/210e6eee7b744721a736b2e39b62580b 2024-11-26T10:34:34,900 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/B of 0dac262a6c43b2828c5201e254d47204 into 210e6eee7b744721a736b2e39b62580b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:34,900 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:34,900 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/B, priority=13, startTime=1732617274480; duration=0sec 2024-11-26T10:34:34,901 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:34,901 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:B 2024-11-26T10:34:34,901 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:34,901 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:34,902 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/C is initiating minor compaction (all files) 2024-11-26T10:34:34,902 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/C in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:34,902 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0fe6dcb9fea04cb2833dfb628ca5d7dd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/1417dd1ebcdc4283b3db5f759ca3a9e5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/297cc4d25a5a4453900eea3faf8d1f46] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=35.9 K 2024-11-26T10:34:34,902 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fe6dcb9fea04cb2833dfb628ca5d7dd, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732617269356 2024-11-26T10:34:34,902 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1417dd1ebcdc4283b3db5f759ca3a9e5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732617270480 2024-11-26T10:34:34,903 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 297cc4d25a5a4453900eea3faf8d1f46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732617272607 2024-11-26T10:34:34,906 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/bf0b8094912c4c03819a0448ed7f0ea3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/bf0b8094912c4c03819a0448ed7f0ea3 2024-11-26T10:34:34,909 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#C#compaction#245 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:34,909 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/c28abcd0c6de476f9f64f5bd7e15c21a is 50, key is test_row_0/C:col10/1732617272611/Put/seqid=0 2024-11-26T10:34:34,910 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/A of 0dac262a6c43b2828c5201e254d47204 into bf0b8094912c4c03819a0448ed7f0ea3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:34,910 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:34,910 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/A, priority=13, startTime=1732617274480; duration=0sec 2024-11-26T10:34:34,910 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:34,910 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:A 2024-11-26T10:34:34,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742113_1289 (size=12595) 2024-11-26T10:34:35,141 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/9a03bdfc134e410180076bc2ba4c62e1 2024-11-26T10:34:35,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/867c2f869c6740fca7eb55b227e82ed6 is 50, key is test_row_0/B:col10/1732617273225/Put/seqid=0 2024-11-26T10:34:35,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742114_1290 (size=12151) 2024-11-26T10:34:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-26T10:34:35,317 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/c28abcd0c6de476f9f64f5bd7e15c21a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/c28abcd0c6de476f9f64f5bd7e15c21a 2024-11-26T10:34:35,320 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/C of 0dac262a6c43b2828c5201e254d47204 into c28abcd0c6de476f9f64f5bd7e15c21a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:35,320 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:35,320 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/C, priority=13, startTime=1732617274480; duration=0sec 2024-11-26T10:34:35,320 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:35,320 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:C 2024-11-26T10:34:35,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:35,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:35,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617335362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617335362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617335364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617335365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617335365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617335465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617335466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617335467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617335468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617335468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,550 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/867c2f869c6740fca7eb55b227e82ed6 2024-11-26T10:34:35,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/efe3b3dc5a4e4c8591b1675c90db6a49 is 50, key is test_row_0/C:col10/1732617273225/Put/seqid=0 2024-11-26T10:34:35,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742115_1291 (size=12151) 2024-11-26T10:34:35,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617335668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617335669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617335669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617335670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617335671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-26T10:34:35,960 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/efe3b3dc5a4e4c8591b1675c90db6a49 2024-11-26T10:34:35,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/9a03bdfc134e410180076bc2ba4c62e1 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/9a03bdfc134e410180076bc2ba4c62e1 2024-11-26T10:34:35,969 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/9a03bdfc134e410180076bc2ba4c62e1, entries=150, sequenceid=212, filesize=11.9 K 2024-11-26T10:34:35,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/867c2f869c6740fca7eb55b227e82ed6 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/867c2f869c6740fca7eb55b227e82ed6 2024-11-26T10:34:35,973 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/867c2f869c6740fca7eb55b227e82ed6, entries=150, sequenceid=212, filesize=11.9 K 2024-11-26T10:34:35,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617335972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617335972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617335972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/efe3b3dc5a4e4c8591b1675c90db6a49 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/efe3b3dc5a4e4c8591b1675c90db6a49 2024-11-26T10:34:35,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617335973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:35,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617335975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:35,978 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/efe3b3dc5a4e4c8591b1675c90db6a49, entries=150, sequenceid=212, filesize=11.9 K 2024-11-26T10:34:35,978 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 0dac262a6c43b2828c5201e254d47204 in 1244ms, sequenceid=212, compaction requested=false 2024-11-26T10:34:35,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:35,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:35,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-26T10:34:35,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-26T10:34:35,980 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-26T10:34:35,980 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3970 sec 2024-11-26T10:34:35,981 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.4000 sec 2024-11-26T10:34:36,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:36,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-26T10:34:36,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:36,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:36,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:36,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:36,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:36,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:36,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/21c8e8bab13a445cb542338f15677397 is 50, key is test_row_0/A:col10/1732617275364/Put/seqid=0 2024-11-26T10:34:36,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742116_1292 (size=12151) 2024-11-26T10:34:36,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617336483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617336484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617336484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617336484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617336485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617336586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617336588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617336588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617336588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617336588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-26T10:34:36,685 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-26T10:34:36,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:36,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-26T10:34:36,687 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:36,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-26T10:34:36,688 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:36,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:36,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-26T10:34:36,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617336789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617336790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617336791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617336791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:36,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617336791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,838 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-26T10:34:36,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:36,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:36,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:36,839 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:36,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:36,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:36,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/21c8e8bab13a445cb542338f15677397 2024-11-26T10:34:36,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/e40a9bc83423451da77a8abe3f17b49b is 50, key is test_row_0/B:col10/1732617275364/Put/seqid=0 2024-11-26T10:34:36,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742117_1293 (size=12151) 2024-11-26T10:34:36,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-26T10:34:36,990 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:36,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-26T10:34:36,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:36,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:36,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:36,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:36,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:37,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617337093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:37,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617337093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:37,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617337094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:37,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617337094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:37,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617337095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,142 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-26T10:34:37,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:37,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,143 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-26T10:34:37,295 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-26T10:34:37,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:37,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/e40a9bc83423451da77a8abe3f17b49b 2024-11-26T10:34:37,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0f1f82603b2a45c4bee99194a752e836 is 50, key is test_row_0/C:col10/1732617275364/Put/seqid=0 2024-11-26T10:34:37,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742118_1294 (size=12151) 2024-11-26T10:34:37,447 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-26T10:34:37,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:37,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,599 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:37,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617337598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-26T10:34:37,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:37,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617337598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:37,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:37,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617337599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:37,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617337599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:37,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:37,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617337602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0f1f82603b2a45c4bee99194a752e836 2024-11-26T10:34:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/21c8e8bab13a445cb542338f15677397 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/21c8e8bab13a445cb542338f15677397 2024-11-26T10:34:37,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/21c8e8bab13a445cb542338f15677397, entries=150, sequenceid=238, filesize=11.9 K 2024-11-26T10:34:37,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/e40a9bc83423451da77a8abe3f17b49b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/e40a9bc83423451da77a8abe3f17b49b 2024-11-26T10:34:37,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/e40a9bc83423451da77a8abe3f17b49b, entries=150, sequenceid=238, filesize=11.9 K 2024-11-26T10:34:37,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0f1f82603b2a45c4bee99194a752e836 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0f1f82603b2a45c4bee99194a752e836 2024-11-26T10:34:37,743 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0f1f82603b2a45c4bee99194a752e836, entries=150, sequenceid=238, filesize=11.9 K 2024-11-26T10:34:37,743 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 0dac262a6c43b2828c5201e254d47204 in 1267ms, sequenceid=238, compaction requested=true 2024-11-26T10:34:37,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:37,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:37,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:37,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:37,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:37,744 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:37,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:37,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:37,744 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:37,745 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:37,745 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:37,745 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/B is initiating minor compaction (all files) 2024-11-26T10:34:37,745 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/A is initiating minor compaction (all files) 2024-11-26T10:34:37,745 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/A in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,745 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/B in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,745 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/210e6eee7b744721a736b2e39b62580b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/867c2f869c6740fca7eb55b227e82ed6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/e40a9bc83423451da77a8abe3f17b49b] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=36.0 K 2024-11-26T10:34:37,745 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/bf0b8094912c4c03819a0448ed7f0ea3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/9a03bdfc134e410180076bc2ba4c62e1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/21c8e8bab13a445cb542338f15677397] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=36.0 K 2024-11-26T10:34:37,745 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf0b8094912c4c03819a0448ed7f0ea3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732617272607 2024-11-26T10:34:37,745 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 210e6eee7b744721a736b2e39b62580b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732617272607 2024-11-26T10:34:37,745 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 867c2f869c6740fca7eb55b227e82ed6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732617273225 2024-11-26T10:34:37,745 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a03bdfc134e410180076bc2ba4c62e1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732617273225 2024-11-26T10:34:37,746 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting e40a9bc83423451da77a8abe3f17b49b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732617275361 2024-11-26T10:34:37,746 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21c8e8bab13a445cb542338f15677397, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732617275361 2024-11-26T10:34:37,752 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#A#compaction#251 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:37,752 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:37,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-26T10:34:37,752 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/b7943bc23b85470dafee7906d4e80feb is 50, key is test_row_0/A:col10/1732617275364/Put/seqid=0 2024-11-26T10:34:37,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,752 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:34:37,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:37,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:37,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:37,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:37,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:37,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:37,754 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#B#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:37,755 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/74436f3864304282bcf969f804c6c316 is 50, key is test_row_0/B:col10/1732617275364/Put/seqid=0 2024-11-26T10:34:37,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/5050003094d84a949d7620689440c60b is 50, key is test_row_0/A:col10/1732617276484/Put/seqid=0 2024-11-26T10:34:37,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742120_1296 (size=12697) 2024-11-26T10:34:37,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742119_1295 (size=12697) 2024-11-26T10:34:37,768 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/b7943bc23b85470dafee7906d4e80feb as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/b7943bc23b85470dafee7906d4e80feb 2024-11-26T10:34:37,772 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/A of 0dac262a6c43b2828c5201e254d47204 into b7943bc23b85470dafee7906d4e80feb(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:37,772 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:37,772 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/A, priority=13, startTime=1732617277744; duration=0sec 2024-11-26T10:34:37,772 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:37,772 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:A 2024-11-26T10:34:37,772 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:37,773 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:37,773 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/C is initiating minor compaction (all files) 2024-11-26T10:34:37,773 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/C in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:37,773 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/c28abcd0c6de476f9f64f5bd7e15c21a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/efe3b3dc5a4e4c8591b1675c90db6a49, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0f1f82603b2a45c4bee99194a752e836] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=36.0 K 2024-11-26T10:34:37,773 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting c28abcd0c6de476f9f64f5bd7e15c21a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732617272607 2024-11-26T10:34:37,774 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting efe3b3dc5a4e4c8591b1675c90db6a49, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732617273225 2024-11-26T10:34:37,774 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f1f82603b2a45c4bee99194a752e836, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732617275361 2024-11-26T10:34:37,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742121_1297 (size=12151) 2024-11-26T10:34:37,777 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/5050003094d84a949d7620689440c60b 2024-11-26T10:34:37,782 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#C#compaction#254 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:37,782 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/ea788f6a0ce04615b5e38276e2a85b20 is 50, key is test_row_0/C:col10/1732617275364/Put/seqid=0 2024-11-26T10:34:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-26T10:34:37,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/327e96d40a5e45f3a382e3f39f010374 is 50, key is test_row_0/B:col10/1732617276484/Put/seqid=0 2024-11-26T10:34:37,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742122_1298 (size=12697) 2024-11-26T10:34:37,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742123_1299 (size=12151) 2024-11-26T10:34:37,806 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/ea788f6a0ce04615b5e38276e2a85b20 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/ea788f6a0ce04615b5e38276e2a85b20 2024-11-26T10:34:37,811 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/C of 0dac262a6c43b2828c5201e254d47204 into ea788f6a0ce04615b5e38276e2a85b20(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:37,811 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:37,811 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/C, priority=13, startTime=1732617277744; duration=0sec 2024-11-26T10:34:37,811 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:37,811 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:C 2024-11-26T10:34:38,167 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/74436f3864304282bcf969f804c6c316 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/74436f3864304282bcf969f804c6c316 2024-11-26T10:34:38,171 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/B of 0dac262a6c43b2828c5201e254d47204 into 74436f3864304282bcf969f804c6c316(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:38,171 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:38,171 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/B, priority=13, startTime=1732617277744; duration=0sec 2024-11-26T10:34:38,171 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:38,171 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:B 2024-11-26T10:34:38,206 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/327e96d40a5e45f3a382e3f39f010374 2024-11-26T10:34:38,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0401d07b2f0e4e37913aa52104fe6f27 is 50, key is test_row_0/C:col10/1732617276484/Put/seqid=0 2024-11-26T10:34:38,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742124_1300 (size=12151) 2024-11-26T10:34:38,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:38,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:38,616 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0401d07b2f0e4e37913aa52104fe6f27 2024-11-26T10:34:38,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/5050003094d84a949d7620689440c60b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5050003094d84a949d7620689440c60b 2024-11-26T10:34:38,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617338617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,623 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5050003094d84a949d7620689440c60b, entries=150, sequenceid=251, filesize=11.9 K 2024-11-26T10:34:38,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617338620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/327e96d40a5e45f3a382e3f39f010374 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/327e96d40a5e45f3a382e3f39f010374 2024-11-26T10:34:38,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617338621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617338621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617338621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,627 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/327e96d40a5e45f3a382e3f39f010374, entries=150, sequenceid=251, filesize=11.9 K 2024-11-26T10:34:38,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0401d07b2f0e4e37913aa52104fe6f27 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0401d07b2f0e4e37913aa52104fe6f27 2024-11-26T10:34:38,632 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0401d07b2f0e4e37913aa52104fe6f27, entries=150, sequenceid=251, filesize=11.9 K 2024-11-26T10:34:38,632 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 0dac262a6c43b2828c5201e254d47204 in 880ms, sequenceid=251, compaction requested=false 2024-11-26T10:34:38,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:38,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:38,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-26T10:34:38,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-26T10:34:38,635 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-26T10:34:38,635 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9460 sec 2024-11-26T10:34:38,636 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.9490 sec 2024-11-26T10:34:38,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:38,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-26T10:34:38,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:38,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:38,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:38,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:38,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:38,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:38,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/f405c3a6721f45ab9e923a79cddf0e57 is 50, key is test_row_0/A:col10/1732617278620/Put/seqid=0 2024-11-26T10:34:38,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742125_1301 (size=12301) 2024-11-26T10:34:38,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617338731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617338731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617338731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617338732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617338732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-26T10:34:38,791 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-26T10:34:38,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:38,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-26T10:34:38,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-26T10:34:38,793 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:38,793 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:38,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:38,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617338835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617338835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617338835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617338835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617338836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-26T10:34:38,944 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:38,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-26T10:34:38,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:38,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:38,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:38,945 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:38,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617339036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617339038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617339038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617339040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617339040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-26T10:34:39,096 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-26T10:34:39,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:39,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/f405c3a6721f45ab9e923a79cddf0e57 2024-11-26T10:34:39,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/7927b98a844d4db3b42125154f1156c8 is 50, key is test_row_0/B:col10/1732617278620/Put/seqid=0 2024-11-26T10:34:39,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742126_1302 (size=12301) 2024-11-26T10:34:39,249 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-26T10:34:39,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:39,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617339342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617339342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617339343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617339343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617339345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-26T10:34:39,401 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-26T10:34:39,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:39,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/7927b98a844d4db3b42125154f1156c8 2024-11-26T10:34:39,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/790431b298b1400aaed2913700b63559 is 50, key is test_row_0/C:col10/1732617278620/Put/seqid=0 2024-11-26T10:34:39,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742127_1303 (size=12301) 2024-11-26T10:34:39,553 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-26T10:34:39,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:39,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,554 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,705 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-26T10:34:39,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:39,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,706 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,847 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617339844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617339844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617339846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617339847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:39,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617339848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,857 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:39,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-26T10:34:39,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:39,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,858 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-26T10:34:39,953 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/790431b298b1400aaed2913700b63559 2024-11-26T10:34:39,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/f405c3a6721f45ab9e923a79cddf0e57 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/f405c3a6721f45ab9e923a79cddf0e57 2024-11-26T10:34:39,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/f405c3a6721f45ab9e923a79cddf0e57, entries=150, sequenceid=278, filesize=12.0 K 2024-11-26T10:34:39,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/7927b98a844d4db3b42125154f1156c8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/7927b98a844d4db3b42125154f1156c8 2024-11-26T10:34:39,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/7927b98a844d4db3b42125154f1156c8, entries=150, sequenceid=278, filesize=12.0 K 2024-11-26T10:34:39,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/790431b298b1400aaed2913700b63559 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/790431b298b1400aaed2913700b63559 2024-11-26T10:34:39,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/790431b298b1400aaed2913700b63559, entries=150, sequenceid=278, filesize=12.0 K 2024-11-26T10:34:39,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 0dac262a6c43b2828c5201e254d47204 in 1244ms, sequenceid=278, compaction requested=true 2024-11-26T10:34:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:39,968 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:39,968 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:39,969 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:39,969 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/A is initiating minor compaction (all files) 2024-11-26T10:34:39,969 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/A in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,969 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/b7943bc23b85470dafee7906d4e80feb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5050003094d84a949d7620689440c60b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/f405c3a6721f45ab9e923a79cddf0e57] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=36.3 K 2024-11-26T10:34:39,969 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:39,970 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/B is initiating minor compaction (all files) 2024-11-26T10:34:39,970 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/B in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:39,970 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/74436f3864304282bcf969f804c6c316, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/327e96d40a5e45f3a382e3f39f010374, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/7927b98a844d4db3b42125154f1156c8] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=36.3 K 2024-11-26T10:34:39,970 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7943bc23b85470dafee7906d4e80feb, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732617275361 2024-11-26T10:34:39,970 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 74436f3864304282bcf969f804c6c316, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732617275361 2024-11-26T10:34:39,970 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 327e96d40a5e45f3a382e3f39f010374, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732617276480 2024-11-26T10:34:39,970 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5050003094d84a949d7620689440c60b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732617276480 2024-11-26T10:34:39,970 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting f405c3a6721f45ab9e923a79cddf0e57, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732617278620 2024-11-26T10:34:39,970 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 7927b98a844d4db3b42125154f1156c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732617278620 2024-11-26T10:34:39,976 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#B#compaction#260 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:39,976 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/a245fa5796824432bcce7bdafc857c3a is 50, key is test_row_0/B:col10/1732617278620/Put/seqid=0 2024-11-26T10:34:39,976 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#A#compaction#261 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:39,976 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/dc7a67674b254e74bfd501832067277e is 50, key is test_row_0/A:col10/1732617278620/Put/seqid=0 2024-11-26T10:34:39,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742129_1305 (size=12949) 2024-11-26T10:34:39,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742128_1304 (size=12949) 2024-11-26T10:34:40,010 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:40,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-26T10:34:40,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:40,010 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:34:40,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:40,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:40,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:40,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:40,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:40,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:40,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/7376766a62fc469ba2dd08b8bf951f44 is 50, key is test_row_0/A:col10/1732617278730/Put/seqid=0 2024-11-26T10:34:40,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742130_1306 (size=9857) 2024-11-26T10:34:40,384 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/dc7a67674b254e74bfd501832067277e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/dc7a67674b254e74bfd501832067277e 2024-11-26T10:34:40,388 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/A of 0dac262a6c43b2828c5201e254d47204 into dc7a67674b254e74bfd501832067277e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:40,388 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:40,388 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/A, priority=13, startTime=1732617279968; duration=0sec 2024-11-26T10:34:40,388 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:40,388 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:A 2024-11-26T10:34:40,388 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:40,389 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:40,389 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/C is initiating minor compaction (all files) 2024-11-26T10:34:40,389 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/C in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:40,389 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/ea788f6a0ce04615b5e38276e2a85b20, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0401d07b2f0e4e37913aa52104fe6f27, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/790431b298b1400aaed2913700b63559] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=36.3 K 2024-11-26T10:34:40,389 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea788f6a0ce04615b5e38276e2a85b20, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732617275361 2024-11-26T10:34:40,390 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0401d07b2f0e4e37913aa52104fe6f27, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732617276480 2024-11-26T10:34:40,390 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 790431b298b1400aaed2913700b63559, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732617278620 2024-11-26T10:34:40,396 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#C#compaction#263 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:40,396 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/4641a4d374af423da2353d9f5708f71b is 50, key is test_row_0/C:col10/1732617278620/Put/seqid=0 2024-11-26T10:34:40,398 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/a245fa5796824432bcce7bdafc857c3a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a245fa5796824432bcce7bdafc857c3a 2024-11-26T10:34:40,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742131_1307 (size=12949) 2024-11-26T10:34:40,402 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/B of 0dac262a6c43b2828c5201e254d47204 into a245fa5796824432bcce7bdafc857c3a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:40,402 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:40,403 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/B, priority=13, startTime=1732617279968; duration=0sec 2024-11-26T10:34:40,403 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:40,403 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:B 2024-11-26T10:34:40,406 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/4641a4d374af423da2353d9f5708f71b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/4641a4d374af423da2353d9f5708f71b 2024-11-26T10:34:40,409 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/C of 0dac262a6c43b2828c5201e254d47204 into 4641a4d374af423da2353d9f5708f71b(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:40,409 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:40,409 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/C, priority=13, startTime=1732617279968; duration=0sec 2024-11-26T10:34:40,409 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:40,409 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:C 2024-11-26T10:34:40,417 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/7376766a62fc469ba2dd08b8bf951f44 2024-11-26T10:34:40,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/ede7f0fb49ef4ebcabe280ae07ce8d2c is 50, key is test_row_0/B:col10/1732617278730/Put/seqid=0 2024-11-26T10:34:40,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742132_1308 (size=9857) 2024-11-26T10:34:40,827 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/ede7f0fb49ef4ebcabe280ae07ce8d2c 2024-11-26T10:34:40,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/14ec826a584347a69c1ff508ad385ee0 is 50, key is test_row_0/C:col10/1732617278730/Put/seqid=0 2024-11-26T10:34:40,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742133_1309 (size=9857) 2024-11-26T10:34:40,837 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/14ec826a584347a69c1ff508ad385ee0 2024-11-26T10:34:40,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/7376766a62fc469ba2dd08b8bf951f44 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/7376766a62fc469ba2dd08b8bf951f44 2024-11-26T10:34:40,844 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/7376766a62fc469ba2dd08b8bf951f44, entries=100, sequenceid=291, filesize=9.6 K 2024-11-26T10:34:40,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/ede7f0fb49ef4ebcabe280ae07ce8d2c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ede7f0fb49ef4ebcabe280ae07ce8d2c 2024-11-26T10:34:40,849 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ede7f0fb49ef4ebcabe280ae07ce8d2c, entries=100, sequenceid=291, filesize=9.6 K 2024-11-26T10:34:40,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/14ec826a584347a69c1ff508ad385ee0 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/14ec826a584347a69c1ff508ad385ee0 2024-11-26T10:34:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:40,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:40,854 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/14ec826a584347a69c1ff508ad385ee0, entries=100, sequenceid=291, filesize=9.6 K 2024-11-26T10:34:40,855 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=46.96 KB/48090 for 0dac262a6c43b2828c5201e254d47204 in 845ms, sequenceid=291, compaction requested=false 2024-11-26T10:34:40,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:40,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:40,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-26T10:34:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-26T10:34:40,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-26T10:34:40,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:40,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:40,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:40,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:40,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:40,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:40,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-26T10:34:40,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0630 sec 2024-11-26T10:34:40,858 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.0660 sec 2024-11-26T10:34:40,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/85f1c3e679ea4223bd765449c18d33f8 is 50, key is test_row_0/A:col10/1732617280854/Put/seqid=0 2024-11-26T10:34:40,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742134_1310 (size=12301) 2024-11-26T10:34:40,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:40,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:40,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617340874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:40,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617340874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:40,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:40,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617340875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:40,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:40,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617340878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:40,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:40,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617340878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:40,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-26T10:34:40,896 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-26T10:34:40,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:40,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-26T10:34:40,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-26T10:34:40,898 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:40,898 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:40,898 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:40,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:40,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617340979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:40,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:40,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617340979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:40,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:40,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617340979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:40,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:40,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617340982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:40,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:40,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617340982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:40,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-26T10:34:41,049 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-26T10:34:41,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:41,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617341182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617341182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617341182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617341184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617341185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-26T10:34:41,202 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-26T10:34:41,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:41,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/85f1c3e679ea4223bd765449c18d33f8 2024-11-26T10:34:41,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/1b4812e0862f4dcbb718d19aca3dcd96 is 50, key is test_row_0/B:col10/1732617280854/Put/seqid=0 2024-11-26T10:34:41,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742135_1311 (size=12301) 2024-11-26T10:34:41,355 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-26T10:34:41,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:41,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617341483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617341484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617341486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617341486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617341488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-26T10:34:41,507 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-26T10:34:41,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:41,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,659 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-26T10:34:41,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:41,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,660 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:41,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/1b4812e0862f4dcbb718d19aca3dcd96 2024-11-26T10:34:41,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/7e0db07cd28c413b8dee2e5cc10f93ab is 50, key is test_row_0/C:col10/1732617280854/Put/seqid=0 2024-11-26T10:34:41,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742136_1312 (size=12301) 2024-11-26T10:34:41,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/7e0db07cd28c413b8dee2e5cc10f93ab 2024-11-26T10:34:41,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/85f1c3e679ea4223bd765449c18d33f8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/85f1c3e679ea4223bd765449c18d33f8 2024-11-26T10:34:41,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/85f1c3e679ea4223bd765449c18d33f8, entries=150, sequenceid=305, filesize=12.0 K 2024-11-26T10:34:41,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/1b4812e0862f4dcbb718d19aca3dcd96 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1b4812e0862f4dcbb718d19aca3dcd96 2024-11-26T10:34:41,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1b4812e0862f4dcbb718d19aca3dcd96, entries=150, sequenceid=305, filesize=12.0 K 2024-11-26T10:34:41,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/7e0db07cd28c413b8dee2e5cc10f93ab as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/7e0db07cd28c413b8dee2e5cc10f93ab 2024-11-26T10:34:41,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/7e0db07cd28c413b8dee2e5cc10f93ab, entries=150, sequenceid=305, filesize=12.0 K 2024-11-26T10:34:41,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 0dac262a6c43b2828c5201e254d47204 in 872ms, sequenceid=305, compaction requested=true 2024-11-26T10:34:41,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:41,728 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:41,728 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:41,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:41,729 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:41,729 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/A is initiating minor compaction (all files) 2024-11-26T10:34:41,729 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/A in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,729 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/dc7a67674b254e74bfd501832067277e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/7376766a62fc469ba2dd08b8bf951f44, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/85f1c3e679ea4223bd765449c18d33f8] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=34.3 K 2024-11-26T10:34:41,729 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:41,729 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/B is initiating minor compaction (all files) 2024-11-26T10:34:41,729 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/B in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,729 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a245fa5796824432bcce7bdafc857c3a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ede7f0fb49ef4ebcabe280ae07ce8d2c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1b4812e0862f4dcbb718d19aca3dcd96] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=34.3 K 2024-11-26T10:34:41,729 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc7a67674b254e74bfd501832067277e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732617278620 2024-11-26T10:34:41,729 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a245fa5796824432bcce7bdafc857c3a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732617278620 2024-11-26T10:34:41,730 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7376766a62fc469ba2dd08b8bf951f44, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732617278730 2024-11-26T10:34:41,730 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting ede7f0fb49ef4ebcabe280ae07ce8d2c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732617278730 2024-11-26T10:34:41,730 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85f1c3e679ea4223bd765449c18d33f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732617280854 2024-11-26T10:34:41,730 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b4812e0862f4dcbb718d19aca3dcd96, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732617280854 2024-11-26T10:34:41,736 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#B#compaction#269 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:41,737 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#A#compaction#270 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:41,737 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/5a5fb4d46b0949709d066cdfdbed9c12 is 50, key is test_row_0/A:col10/1732617280854/Put/seqid=0 2024-11-26T10:34:41,737 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/33b5ea29da7c42cab020bfff07966751 is 50, key is test_row_0/B:col10/1732617280854/Put/seqid=0 2024-11-26T10:34:41,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742138_1314 (size=13051) 2024-11-26T10:34:41,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742137_1313 (size=13051) 2024-11-26T10:34:41,811 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-26T10:34:41,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:41,812 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-26T10:34:41,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:41,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:41,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:41,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:41,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:41,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:41,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/48fb20ffb697459b98f5d78698c2451b is 50, key is test_row_0/A:col10/1732617280877/Put/seqid=0 2024-11-26T10:34:41,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742139_1315 (size=12301) 2024-11-26T10:34:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:41,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:41,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617341993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617341993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617341994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617341994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:41,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617341994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-26T10:34:42,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617342096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617342097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617342097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617342097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,157 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/5a5fb4d46b0949709d066cdfdbed9c12 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5a5fb4d46b0949709d066cdfdbed9c12 2024-11-26T10:34:42,161 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/A of 0dac262a6c43b2828c5201e254d47204 into 5a5fb4d46b0949709d066cdfdbed9c12(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:42,161 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:42,161 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/A, priority=13, startTime=1732617281728; duration=0sec 2024-11-26T10:34:42,161 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:42,161 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:A 2024-11-26T10:34:42,161 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:42,162 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:42,162 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/C is initiating minor compaction (all files) 2024-11-26T10:34:42,162 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/C in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:42,163 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/4641a4d374af423da2353d9f5708f71b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/14ec826a584347a69c1ff508ad385ee0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/7e0db07cd28c413b8dee2e5cc10f93ab] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=34.3 K 2024-11-26T10:34:42,163 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4641a4d374af423da2353d9f5708f71b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732617278620 2024-11-26T10:34:42,163 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14ec826a584347a69c1ff508ad385ee0, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732617278730 2024-11-26T10:34:42,163 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e0db07cd28c413b8dee2e5cc10f93ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732617280854 2024-11-26T10:34:42,169 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#C#compaction#272 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:42,169 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/1cd35fe98da841ff9cb8970a6908a51e is 50, key is test_row_0/C:col10/1732617280854/Put/seqid=0 2024-11-26T10:34:42,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742140_1316 (size=13051) 2024-11-26T10:34:42,172 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/33b5ea29da7c42cab020bfff07966751 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/33b5ea29da7c42cab020bfff07966751 2024-11-26T10:34:42,177 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/B of 0dac262a6c43b2828c5201e254d47204 into 33b5ea29da7c42cab020bfff07966751(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:42,177 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:42,177 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/B, priority=13, startTime=1732617281728; duration=0sec 2024-11-26T10:34:42,177 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:42,177 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:B 2024-11-26T10:34:42,224 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/48fb20ffb697459b98f5d78698c2451b 2024-11-26T10:34:42,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/6c7230a608434a70aec2aeee490d07fc is 50, key is test_row_0/B:col10/1732617280877/Put/seqid=0 2024-11-26T10:34:42,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742141_1317 (size=12301) 2024-11-26T10:34:42,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617342300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617342300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617342300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617342300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,576 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/1cd35fe98da841ff9cb8970a6908a51e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/1cd35fe98da841ff9cb8970a6908a51e 2024-11-26T10:34:42,588 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/C of 0dac262a6c43b2828c5201e254d47204 into 1cd35fe98da841ff9cb8970a6908a51e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:42,589 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:42,589 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/C, priority=13, startTime=1732617281728; duration=0sec 2024-11-26T10:34:42,589 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:42,589 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:C 2024-11-26T10:34:42,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617342602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617342603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617342603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:42,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617342604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:42,633 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/6c7230a608434a70aec2aeee490d07fc 2024-11-26T10:34:42,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/dc79f16df7334dffa2a6e59900ed31fa is 50, key is test_row_0/C:col10/1732617280877/Put/seqid=0 2024-11-26T10:34:42,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742142_1318 (size=12301) 2024-11-26T10:34:43,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-26T10:34:43,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617343005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,048 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/dc79f16df7334dffa2a6e59900ed31fa 2024-11-26T10:34:43,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/48fb20ffb697459b98f5d78698c2451b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/48fb20ffb697459b98f5d78698c2451b 2024-11-26T10:34:43,054 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/48fb20ffb697459b98f5d78698c2451b, entries=150, sequenceid=331, filesize=12.0 K 2024-11-26T10:34:43,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/6c7230a608434a70aec2aeee490d07fc as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/6c7230a608434a70aec2aeee490d07fc 2024-11-26T10:34:43,059 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/6c7230a608434a70aec2aeee490d07fc, entries=150, sequenceid=331, filesize=12.0 K 2024-11-26T10:34:43,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/dc79f16df7334dffa2a6e59900ed31fa as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/dc79f16df7334dffa2a6e59900ed31fa 2024-11-26T10:34:43,062 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/dc79f16df7334dffa2a6e59900ed31fa, entries=150, sequenceid=331, filesize=12.0 K 2024-11-26T10:34:43,063 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 0dac262a6c43b2828c5201e254d47204 in 1251ms, sequenceid=331, compaction requested=false 2024-11-26T10:34:43,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:43,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:43,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-26T10:34:43,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-26T10:34:43,065 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-26T10:34:43,065 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1660 sec 2024-11-26T10:34:43,066 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 2.1690 sec 2024-11-26T10:34:43,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:43,106 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-26T10:34:43,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:43,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:43,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:43,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:43,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:43,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:43,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/3bbe9c9173514c058db4c8b89c34ca41 is 50, key is test_row_0/A:col10/1732617283105/Put/seqid=0 2024-11-26T10:34:43,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742143_1319 (size=12297) 2024-11-26T10:34:43,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/3bbe9c9173514c058db4c8b89c34ca41 2024-11-26T10:34:43,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/932e7290d77d49c5a2867107c00a38d3 is 50, key is test_row_0/B:col10/1732617283105/Put/seqid=0 2024-11-26T10:34:43,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742144_1320 (size=9857) 2024-11-26T10:34:43,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617343153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617343153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617343156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617343156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617343257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617343257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617343258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617343258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617343461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617343461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617343462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617343462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/932e7290d77d49c5a2867107c00a38d3 2024-11-26T10:34:43,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/3a3d5779179249ca80800ca62f17bcd3 is 50, key is test_row_0/C:col10/1732617283105/Put/seqid=0 2024-11-26T10:34:43,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742145_1321 (size=9857) 2024-11-26T10:34:43,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617343764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617343764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617343764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:43,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617343766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:43,941 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/3a3d5779179249ca80800ca62f17bcd3 2024-11-26T10:34:43,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/3bbe9c9173514c058db4c8b89c34ca41 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/3bbe9c9173514c058db4c8b89c34ca41 2024-11-26T10:34:43,947 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/3bbe9c9173514c058db4c8b89c34ca41, entries=150, sequenceid=345, filesize=12.0 K 2024-11-26T10:34:43,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/932e7290d77d49c5a2867107c00a38d3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/932e7290d77d49c5a2867107c00a38d3 2024-11-26T10:34:43,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/932e7290d77d49c5a2867107c00a38d3, entries=100, sequenceid=345, filesize=9.6 K 2024-11-26T10:34:43,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/3a3d5779179249ca80800ca62f17bcd3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/3a3d5779179249ca80800ca62f17bcd3 2024-11-26T10:34:43,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/3a3d5779179249ca80800ca62f17bcd3, entries=100, sequenceid=345, filesize=9.6 K 2024-11-26T10:34:43,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 0dac262a6c43b2828c5201e254d47204 in 849ms, sequenceid=345, compaction requested=true 2024-11-26T10:34:43,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:43,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:43,955 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:43,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:43,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:43,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:43,956 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:43,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0dac262a6c43b2828c5201e254d47204:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:43,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:43,956 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:43,956 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37649 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:43,956 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/A is initiating minor compaction (all files) 2024-11-26T10:34:43,956 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/B is initiating minor compaction (all files) 2024-11-26T10:34:43,956 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/A in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:43,956 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/B in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:43,957 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/33b5ea29da7c42cab020bfff07966751, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/6c7230a608434a70aec2aeee490d07fc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/932e7290d77d49c5a2867107c00a38d3] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=34.4 K 2024-11-26T10:34:43,957 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5a5fb4d46b0949709d066cdfdbed9c12, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/48fb20ffb697459b98f5d78698c2451b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/3bbe9c9173514c058db4c8b89c34ca41] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=36.8 K 2024-11-26T10:34:43,957 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 33b5ea29da7c42cab020bfff07966751, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732617280854 2024-11-26T10:34:43,957 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a5fb4d46b0949709d066cdfdbed9c12, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732617280854 2024-11-26T10:34:43,957 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c7230a608434a70aec2aeee490d07fc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732617280873 2024-11-26T10:34:43,957 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48fb20ffb697459b98f5d78698c2451b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732617280873 2024-11-26T10:34:43,958 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 932e7290d77d49c5a2867107c00a38d3, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1732617281993 2024-11-26T10:34:43,958 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bbe9c9173514c058db4c8b89c34ca41, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1732617281992 2024-11-26T10:34:43,964 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#B#compaction#278 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:43,964 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#A#compaction#279 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:43,965 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/8ecd631d4d7d4438bd96997d7eb0d157 is 50, key is test_row_0/A:col10/1732617283105/Put/seqid=0 2024-11-26T10:34:43,965 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/c8f40e4fa4414fc09c7b420038c2cf58 is 50, key is test_row_0/B:col10/1732617283105/Put/seqid=0 2024-11-26T10:34:43,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742146_1322 (size=13153) 2024-11-26T10:34:43,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742147_1323 (size=13153) 2024-11-26T10:34:44,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:44,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-26T10:34:44,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:44,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:44,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:44,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:44,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:44,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:44,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/a221dba4f1c742b68cc0aca857738d9f is 50, key is test_row_0/A:col10/1732617283155/Put/seqid=0 2024-11-26T10:34:44,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60804 deadline: 1732617344272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617344272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742148_1324 (size=14741) 2024-11-26T10:34:44,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617344272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617344275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617344376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617344376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617344378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,393 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/c8f40e4fa4414fc09c7b420038c2cf58 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c8f40e4fa4414fc09c7b420038c2cf58 2024-11-26T10:34:44,393 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/8ecd631d4d7d4438bd96997d7eb0d157 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/8ecd631d4d7d4438bd96997d7eb0d157 2024-11-26T10:34:44,398 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/A of 0dac262a6c43b2828c5201e254d47204 into 8ecd631d4d7d4438bd96997d7eb0d157(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:44,398 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/B of 0dac262a6c43b2828c5201e254d47204 into c8f40e4fa4414fc09c7b420038c2cf58(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:44,398 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:44,398 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:44,398 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/B, priority=13, startTime=1732617283955; duration=0sec 2024-11-26T10:34:44,398 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/A, priority=13, startTime=1732617283955; duration=0sec 2024-11-26T10:34:44,398 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:44,398 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:B 2024-11-26T10:34:44,398 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:44,398 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:A 2024-11-26T10:34:44,398 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:44,399 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:44,399 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 0dac262a6c43b2828c5201e254d47204/C is initiating minor compaction (all files) 2024-11-26T10:34:44,399 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0dac262a6c43b2828c5201e254d47204/C in TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:44,399 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/1cd35fe98da841ff9cb8970a6908a51e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/dc79f16df7334dffa2a6e59900ed31fa, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/3a3d5779179249ca80800ca62f17bcd3] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp, totalSize=34.4 K 2024-11-26T10:34:44,399 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cd35fe98da841ff9cb8970a6908a51e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732617280854 2024-11-26T10:34:44,400 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting dc79f16df7334dffa2a6e59900ed31fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732617280873 2024-11-26T10:34:44,400 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a3d5779179249ca80800ca62f17bcd3, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1732617281993 2024-11-26T10:34:44,405 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0dac262a6c43b2828c5201e254d47204#C#compaction#281 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:44,405 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0c7aa6913db9483d80104d3af74de592 is 50, key is test_row_0/C:col10/1732617283105/Put/seqid=0 2024-11-26T10:34:44,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742149_1325 (size=13153) 2024-11-26T10:34:44,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617344578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617344579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617344580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,676 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/a221dba4f1c742b68cc0aca857738d9f 2024-11-26T10:34:44,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/dbaeeb9c1c9b480c8a3650e91b9a47c6 is 50, key is test_row_0/B:col10/1732617283155/Put/seqid=0 2024-11-26T10:34:44,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742150_1326 (size=12301) 2024-11-26T10:34:44,685 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/dbaeeb9c1c9b480c8a3650e91b9a47c6 2024-11-26T10:34:44,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/e8d9e2b87d154f4e9f300ba1402c51db is 50, key is test_row_0/C:col10/1732617283155/Put/seqid=0 2024-11-26T10:34:44,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742151_1327 (size=12301) 2024-11-26T10:34:44,812 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0c7aa6913db9483d80104d3af74de592 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0c7aa6913db9483d80104d3af74de592 2024-11-26T10:34:44,816 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0dac262a6c43b2828c5201e254d47204/C of 0dac262a6c43b2828c5201e254d47204 into 0c7aa6913db9483d80104d3af74de592(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:44,816 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:44,816 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., storeName=0dac262a6c43b2828c5201e254d47204/C, priority=13, startTime=1732617283956; duration=0sec 2024-11-26T10:34:44,816 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:44,816 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0dac262a6c43b2828c5201e254d47204:C 2024-11-26T10:34:44,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60788 deadline: 1732617344880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60746 deadline: 1732617344881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:44,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:44,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60758 deadline: 1732617344884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:45,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-26T10:34:45,002 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-26T10:34:45,003 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:45,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-26T10:34:45,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-26T10:34:45,004 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:45,004 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:45,004 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:45,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:45,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60836 deadline: 1732617345018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:45,020 DEBUG [Thread-1145 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:34:45,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/e8d9e2b87d154f4e9f300ba1402c51db 2024-11-26T10:34:45,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/a221dba4f1c742b68cc0aca857738d9f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/a221dba4f1c742b68cc0aca857738d9f 2024-11-26T10:34:45,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/a221dba4f1c742b68cc0aca857738d9f, entries=200, sequenceid=373, filesize=14.4 K 2024-11-26T10:34:45,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/dbaeeb9c1c9b480c8a3650e91b9a47c6 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/dbaeeb9c1c9b480c8a3650e91b9a47c6 2024-11-26T10:34:45,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/dbaeeb9c1c9b480c8a3650e91b9a47c6, entries=150, sequenceid=373, filesize=12.0 K 2024-11-26T10:34:45,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-26T10:34:45,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/e8d9e2b87d154f4e9f300ba1402c51db as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/e8d9e2b87d154f4e9f300ba1402c51db 2024-11-26T10:34:45,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/e8d9e2b87d154f4e9f300ba1402c51db, entries=150, sequenceid=373, filesize=12.0 K 2024-11-26T10:34:45,108 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 0dac262a6c43b2828c5201e254d47204 in 840ms, sequenceid=373, compaction requested=false 2024-11-26T10:34:45,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:45,156 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:45,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-26T10:34:45,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:45,156 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-26T10:34:45,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:45,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:45,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:45,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:45,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:45,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:45,157 DEBUG [Thread-1156 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c299cfb to 127.0.0.1:61934 2024-11-26T10:34:45,157 DEBUG [Thread-1156 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:45,157 DEBUG [Thread-1152 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72537a47 to 127.0.0.1:61934 2024-11-26T10:34:45,157 DEBUG [Thread-1152 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:45,158 DEBUG [Thread-1154 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x036642cb to 127.0.0.1:61934 2024-11-26T10:34:45,158 DEBUG [Thread-1154 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:45,158 DEBUG [Thread-1158 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x605827c9 to 127.0.0.1:61934 2024-11-26T10:34:45,158 DEBUG [Thread-1158 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:45,159 DEBUG [Thread-1150 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14ed1e44 to 127.0.0.1:61934 2024-11-26T10:34:45,159 DEBUG [Thread-1150 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:45,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/c6f24f2994744053a85a2039b34be9b3 is 50, key is test_row_0/A:col10/1732617284273/Put/seqid=0 2024-11-26T10:34:45,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742152_1328 (size=12301) 2024-11-26T10:34:45,279 DEBUG [Thread-1147 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e9ae050 to 127.0.0.1:61934 2024-11-26T10:34:45,279 DEBUG [Thread-1147 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:45,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-26T10:34:45,387 DEBUG [Thread-1139 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c1ac389 to 127.0.0.1:61934 2024-11-26T10:34:45,387 DEBUG [Thread-1139 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:45,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:45,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. as already flushing 2024-11-26T10:34:45,388 DEBUG [Thread-1143 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c480dfb to 127.0.0.1:61934 2024-11-26T10:34:45,388 DEBUG [Thread-1143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:45,391 DEBUG [Thread-1141 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x028e73c0 to 127.0.0.1:61934 2024-11-26T10:34:45,391 DEBUG [Thread-1141 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:45,564 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/c6f24f2994744053a85a2039b34be9b3 2024-11-26T10:34:45,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/1f99809dabd047888e61e739b4b177c7 is 50, key is test_row_0/B:col10/1732617284273/Put/seqid=0 2024-11-26T10:34:45,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742153_1329 (size=12301) 2024-11-26T10:34:45,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-26T10:34:45,986 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/1f99809dabd047888e61e739b4b177c7 2024-11-26T10:34:45,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0366dc07ce8b479b8d26eb95992bae82 is 50, key is test_row_0/C:col10/1732617284273/Put/seqid=0 2024-11-26T10:34:46,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742154_1330 (size=12301) 2024-11-26T10:34:46,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-26T10:34:46,405 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0366dc07ce8b479b8d26eb95992bae82 2024-11-26T10:34:46,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/c6f24f2994744053a85a2039b34be9b3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/c6f24f2994744053a85a2039b34be9b3 2024-11-26T10:34:46,420 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/c6f24f2994744053a85a2039b34be9b3, entries=150, sequenceid=384, filesize=12.0 K 2024-11-26T10:34:46,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/1f99809dabd047888e61e739b4b177c7 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1f99809dabd047888e61e739b4b177c7 2024-11-26T10:34:46,424 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1f99809dabd047888e61e739b4b177c7, entries=150, sequenceid=384, filesize=12.0 K 2024-11-26T10:34:46,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/0366dc07ce8b479b8d26eb95992bae82 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0366dc07ce8b479b8d26eb95992bae82 2024-11-26T10:34:46,428 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0366dc07ce8b479b8d26eb95992bae82, entries=150, sequenceid=384, filesize=12.0 K 2024-11-26T10:34:46,428 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=26.84 KB/27480 for 0dac262a6c43b2828c5201e254d47204 in 1272ms, sequenceid=384, compaction requested=true 2024-11-26T10:34:46,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:46,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:46,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-26T10:34:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-26T10:34:46,431 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-26T10:34:46,431 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4250 sec 2024-11-26T10:34:46,431 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.4280 sec 2024-11-26T10:34:47,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-26T10:34:47,112 INFO [Thread-1149 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-26T10:34:49,038 DEBUG [Thread-1145 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34cb3991 to 127.0.0.1:61934 2024-11-26T10:34:49,038 DEBUG [Thread-1145 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 47 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7591 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7679 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7367 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7572 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7705 2024-11-26T10:34:49,039 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-26T10:34:49,040 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-26T10:34:49,040 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64dc42d9 to 127.0.0.1:61934 2024-11-26T10:34:49,040 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:34:49,041 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-26T10:34:49,042 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-26T10:34:49,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:49,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-26T10:34:49,046 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617289046"}]},"ts":"1732617289046"} 2024-11-26T10:34:49,047 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-26T10:34:49,094 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-26T10:34:49,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-26T10:34:49,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0dac262a6c43b2828c5201e254d47204, UNASSIGN}] 2024-11-26T10:34:49,097 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0dac262a6c43b2828c5201e254d47204, UNASSIGN 2024-11-26T10:34:49,098 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=0dac262a6c43b2828c5201e254d47204, regionState=CLOSING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:49,099 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-26T10:34:49,099 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; CloseRegionProcedure 0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:34:49,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-26T10:34:49,251 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:49,253 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(124): Close 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:49,253 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-26T10:34:49,253 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1681): Closing 0dac262a6c43b2828c5201e254d47204, disabling compactions & flushes 2024-11-26T10:34:49,253 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:49,253 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:49,253 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. after waiting 0 ms 2024-11-26T10:34:49,253 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:49,254 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(2837): Flushing 0dac262a6c43b2828c5201e254d47204 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-26T10:34:49,254 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=A 2024-11-26T10:34:49,254 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:49,254 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=B 2024-11-26T10:34:49,255 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:49,255 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0dac262a6c43b2828c5201e254d47204, store=C 2024-11-26T10:34:49,255 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:49,265 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/5e6131fb91a34e5c8a56122f0db4dce5 is 50, key is test_row_0/A:col10/1732617285277/Put/seqid=0 2024-11-26T10:34:49,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742155_1331 (size=12301) 2024-11-26T10:34:49,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-26T10:34:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-26T10:34:49,670 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/5e6131fb91a34e5c8a56122f0db4dce5 2024-11-26T10:34:49,685 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/a13513b97f56449f918bab7b4d076ec5 is 50, key is test_row_0/B:col10/1732617285277/Put/seqid=0 2024-11-26T10:34:49,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742156_1332 (size=12301) 2024-11-26T10:34:50,089 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/a13513b97f56449f918bab7b4d076ec5 2024-11-26T10:34:50,102 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/b4b09c45404c4bdc8a0cdd4fabf58d39 is 50, key is test_row_0/C:col10/1732617285277/Put/seqid=0 2024-11-26T10:34:50,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742157_1333 (size=12301) 2024-11-26T10:34:50,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-26T10:34:50,508 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/b4b09c45404c4bdc8a0cdd4fabf58d39 2024-11-26T10:34:50,519 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/A/5e6131fb91a34e5c8a56122f0db4dce5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5e6131fb91a34e5c8a56122f0db4dce5 2024-11-26T10:34:50,524 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5e6131fb91a34e5c8a56122f0db4dce5, entries=150, sequenceid=392, filesize=12.0 K 2024-11-26T10:34:50,525 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/B/a13513b97f56449f918bab7b4d076ec5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a13513b97f56449f918bab7b4d076ec5 2024-11-26T10:34:50,529 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a13513b97f56449f918bab7b4d076ec5, entries=150, sequenceid=392, filesize=12.0 K 2024-11-26T10:34:50,530 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/.tmp/C/b4b09c45404c4bdc8a0cdd4fabf58d39 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/b4b09c45404c4bdc8a0cdd4fabf58d39 2024-11-26T10:34:50,533 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/b4b09c45404c4bdc8a0cdd4fabf58d39, entries=150, sequenceid=392, filesize=12.0 K 2024-11-26T10:34:50,534 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 0dac262a6c43b2828c5201e254d47204 in 1281ms, sequenceid=392, compaction requested=true 2024-11-26T10:34:50,535 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e752a79261844fb1abbe3090f9815526, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/0645dd61fed943efb816b6a299b6a823, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/1b5f5a45933e400985c30dcd14ccdc89, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e70ce4ded90a44f199ccb855ede9dc94, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/75305929460747cd8c99e5b3b0de0a06, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/0f2bbab550ed47efb91b34fc0d24adad, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/feff1918ff694adfa916cc414f8f22de, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/6843f35503e249f0810f11ca3ea930cf, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/784d5a145ebb48bb9fdf7f25107d2ca7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/9b546183331b491d98509d8f69616090, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/df061021645b45d28deabd0d0e297123, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/d364621ffe5e42f8a838de0ab5792b3b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/bf0b8094912c4c03819a0448ed7f0ea3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/2ca4ce2cfab3496a9e9c2c59e6730df8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/9a03bdfc134e410180076bc2ba4c62e1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/b7943bc23b85470dafee7906d4e80feb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/21c8e8bab13a445cb542338f15677397, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5050003094d84a949d7620689440c60b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/dc7a67674b254e74bfd501832067277e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/f405c3a6721f45ab9e923a79cddf0e57, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/7376766a62fc469ba2dd08b8bf951f44, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5a5fb4d46b0949709d066cdfdbed9c12, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/85f1c3e679ea4223bd765449c18d33f8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/48fb20ffb697459b98f5d78698c2451b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/3bbe9c9173514c058db4c8b89c34ca41] to archive 2024-11-26T10:34:50,535 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:34:50,537 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e752a79261844fb1abbe3090f9815526 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e752a79261844fb1abbe3090f9815526 2024-11-26T10:34:50,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/0645dd61fed943efb816b6a299b6a823 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/0645dd61fed943efb816b6a299b6a823 2024-11-26T10:34:50,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/1b5f5a45933e400985c30dcd14ccdc89 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/1b5f5a45933e400985c30dcd14ccdc89 2024-11-26T10:34:50,540 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e70ce4ded90a44f199ccb855ede9dc94 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/e70ce4ded90a44f199ccb855ede9dc94 2024-11-26T10:34:50,541 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/75305929460747cd8c99e5b3b0de0a06 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/75305929460747cd8c99e5b3b0de0a06 2024-11-26T10:34:50,542 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/0f2bbab550ed47efb91b34fc0d24adad to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/0f2bbab550ed47efb91b34fc0d24adad 2024-11-26T10:34:50,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/feff1918ff694adfa916cc414f8f22de to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/feff1918ff694adfa916cc414f8f22de 2024-11-26T10:34:50,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/6843f35503e249f0810f11ca3ea930cf to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/6843f35503e249f0810f11ca3ea930cf 2024-11-26T10:34:50,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/784d5a145ebb48bb9fdf7f25107d2ca7 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/784d5a145ebb48bb9fdf7f25107d2ca7 2024-11-26T10:34:50,545 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/9b546183331b491d98509d8f69616090 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/9b546183331b491d98509d8f69616090 2024-11-26T10:34:50,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/df061021645b45d28deabd0d0e297123 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/df061021645b45d28deabd0d0e297123 2024-11-26T10:34:50,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/d364621ffe5e42f8a838de0ab5792b3b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/d364621ffe5e42f8a838de0ab5792b3b 2024-11-26T10:34:50,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/bf0b8094912c4c03819a0448ed7f0ea3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/bf0b8094912c4c03819a0448ed7f0ea3 2024-11-26T10:34:50,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/2ca4ce2cfab3496a9e9c2c59e6730df8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/2ca4ce2cfab3496a9e9c2c59e6730df8 2024-11-26T10:34:50,549 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/9a03bdfc134e410180076bc2ba4c62e1 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/9a03bdfc134e410180076bc2ba4c62e1 2024-11-26T10:34:50,549 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/b7943bc23b85470dafee7906d4e80feb to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/b7943bc23b85470dafee7906d4e80feb 2024-11-26T10:34:50,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/21c8e8bab13a445cb542338f15677397 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/21c8e8bab13a445cb542338f15677397 2024-11-26T10:34:50,551 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5050003094d84a949d7620689440c60b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5050003094d84a949d7620689440c60b 2024-11-26T10:34:50,552 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/dc7a67674b254e74bfd501832067277e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/dc7a67674b254e74bfd501832067277e 2024-11-26T10:34:50,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/f405c3a6721f45ab9e923a79cddf0e57 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/f405c3a6721f45ab9e923a79cddf0e57 2024-11-26T10:34:50,554 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/7376766a62fc469ba2dd08b8bf951f44 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/7376766a62fc469ba2dd08b8bf951f44 2024-11-26T10:34:50,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5a5fb4d46b0949709d066cdfdbed9c12 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5a5fb4d46b0949709d066cdfdbed9c12 2024-11-26T10:34:50,556 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/85f1c3e679ea4223bd765449c18d33f8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/85f1c3e679ea4223bd765449c18d33f8 2024-11-26T10:34:50,556 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/48fb20ffb697459b98f5d78698c2451b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/48fb20ffb697459b98f5d78698c2451b 2024-11-26T10:34:50,557 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/3bbe9c9173514c058db4c8b89c34ca41 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/3bbe9c9173514c058db4c8b89c34ca41 2024-11-26T10:34:50,558 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/b02b792efc90412ca2eec5701f42908e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c5e45555c5084477aff4b95d0f9386dd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c87320d8514b48acac59a3a327719906, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/97a6a910a5454523996bcfdfffc8df28, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/4b1a36e8168a4d09925b765b68577969, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/2b10ef81bd454e02ac96197d8bbcfb02, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ae56c432499e4456852d0248fbd22c04, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/3eed9cd449e7466b8abe3702e77214ad, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1c202d2444584c0ba743cdf3291b459a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a7f9e985d5d94ac4ad5b680e3403505e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/351c9f4d74274f7daf4565ae20012cc4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/9c7c7297d9a94228b096ce05eac9151a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/210e6eee7b744721a736b2e39b62580b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/23a7a7b8832a4051ab73202614ce2d0c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/867c2f869c6740fca7eb55b227e82ed6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/74436f3864304282bcf969f804c6c316, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/e40a9bc83423451da77a8abe3f17b49b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/327e96d40a5e45f3a382e3f39f010374, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a245fa5796824432bcce7bdafc857c3a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/7927b98a844d4db3b42125154f1156c8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ede7f0fb49ef4ebcabe280ae07ce8d2c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/33b5ea29da7c42cab020bfff07966751, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1b4812e0862f4dcbb718d19aca3dcd96, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/6c7230a608434a70aec2aeee490d07fc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/932e7290d77d49c5a2867107c00a38d3] to archive 2024-11-26T10:34:50,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:34:50,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/b02b792efc90412ca2eec5701f42908e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/b02b792efc90412ca2eec5701f42908e 2024-11-26T10:34:50,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c5e45555c5084477aff4b95d0f9386dd to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c5e45555c5084477aff4b95d0f9386dd 2024-11-26T10:34:50,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c87320d8514b48acac59a3a327719906 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c87320d8514b48acac59a3a327719906 2024-11-26T10:34:50,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/97a6a910a5454523996bcfdfffc8df28 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/97a6a910a5454523996bcfdfffc8df28 2024-11-26T10:34:50,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/4b1a36e8168a4d09925b765b68577969 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/4b1a36e8168a4d09925b765b68577969 2024-11-26T10:34:50,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/2b10ef81bd454e02ac96197d8bbcfb02 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/2b10ef81bd454e02ac96197d8bbcfb02 2024-11-26T10:34:50,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ae56c432499e4456852d0248fbd22c04 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ae56c432499e4456852d0248fbd22c04 2024-11-26T10:34:50,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/3eed9cd449e7466b8abe3702e77214ad to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/3eed9cd449e7466b8abe3702e77214ad 2024-11-26T10:34:50,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1c202d2444584c0ba743cdf3291b459a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1c202d2444584c0ba743cdf3291b459a 2024-11-26T10:34:50,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a7f9e985d5d94ac4ad5b680e3403505e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a7f9e985d5d94ac4ad5b680e3403505e 2024-11-26T10:34:50,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/351c9f4d74274f7daf4565ae20012cc4 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/351c9f4d74274f7daf4565ae20012cc4 2024-11-26T10:34:50,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/9c7c7297d9a94228b096ce05eac9151a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/9c7c7297d9a94228b096ce05eac9151a 2024-11-26T10:34:50,569 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/210e6eee7b744721a736b2e39b62580b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/210e6eee7b744721a736b2e39b62580b 2024-11-26T10:34:50,570 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/23a7a7b8832a4051ab73202614ce2d0c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/23a7a7b8832a4051ab73202614ce2d0c 2024-11-26T10:34:50,571 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/867c2f869c6740fca7eb55b227e82ed6 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/867c2f869c6740fca7eb55b227e82ed6 2024-11-26T10:34:50,571 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/74436f3864304282bcf969f804c6c316 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/74436f3864304282bcf969f804c6c316 2024-11-26T10:34:50,572 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/e40a9bc83423451da77a8abe3f17b49b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/e40a9bc83423451da77a8abe3f17b49b 2024-11-26T10:34:50,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/327e96d40a5e45f3a382e3f39f010374 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/327e96d40a5e45f3a382e3f39f010374 2024-11-26T10:34:50,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a245fa5796824432bcce7bdafc857c3a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a245fa5796824432bcce7bdafc857c3a 2024-11-26T10:34:50,574 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/7927b98a844d4db3b42125154f1156c8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/7927b98a844d4db3b42125154f1156c8 2024-11-26T10:34:50,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ede7f0fb49ef4ebcabe280ae07ce8d2c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/ede7f0fb49ef4ebcabe280ae07ce8d2c 2024-11-26T10:34:50,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/33b5ea29da7c42cab020bfff07966751 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/33b5ea29da7c42cab020bfff07966751 2024-11-26T10:34:50,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1b4812e0862f4dcbb718d19aca3dcd96 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1b4812e0862f4dcbb718d19aca3dcd96 2024-11-26T10:34:50,577 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/6c7230a608434a70aec2aeee490d07fc to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/6c7230a608434a70aec2aeee490d07fc 2024-11-26T10:34:50,578 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/932e7290d77d49c5a2867107c00a38d3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/932e7290d77d49c5a2867107c00a38d3 2024-11-26T10:34:50,579 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/73b6d73b09624b9ca37be1f2109e233c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/25d8ec7aaa564c8fa87ece51631a5251, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/979ee34cf5f64c37a4c14a5d2f99253a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/a94015a34c7344d58e655d3b0f745be6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/eb3e7aaa7ab0455c85cd25dac8763227, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0359b2a6214a4ec598d86ab425a95ef7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/2d9c7f26d6ff4abab1fea3b05e739e88, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/2c728f3209a64be897ffef1a7e29bc85, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d06bbcf4acdf4cf99db391839fa33003, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0fe6dcb9fea04cb2833dfb628ca5d7dd, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d604a6b4e723415fa64720ed7cab4e0e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/1417dd1ebcdc4283b3db5f759ca3a9e5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/c28abcd0c6de476f9f64f5bd7e15c21a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/297cc4d25a5a4453900eea3faf8d1f46, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/efe3b3dc5a4e4c8591b1675c90db6a49, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/ea788f6a0ce04615b5e38276e2a85b20, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0f1f82603b2a45c4bee99194a752e836, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0401d07b2f0e4e37913aa52104fe6f27, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/4641a4d374af423da2353d9f5708f71b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/790431b298b1400aaed2913700b63559, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/14ec826a584347a69c1ff508ad385ee0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/1cd35fe98da841ff9cb8970a6908a51e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/7e0db07cd28c413b8dee2e5cc10f93ab, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/dc79f16df7334dffa2a6e59900ed31fa, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/3a3d5779179249ca80800ca62f17bcd3] to archive 2024-11-26T10:34:50,579 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:34:50,580 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/73b6d73b09624b9ca37be1f2109e233c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/73b6d73b09624b9ca37be1f2109e233c 2024-11-26T10:34:50,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/25d8ec7aaa564c8fa87ece51631a5251 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/25d8ec7aaa564c8fa87ece51631a5251 2024-11-26T10:34:50,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/979ee34cf5f64c37a4c14a5d2f99253a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/979ee34cf5f64c37a4c14a5d2f99253a 2024-11-26T10:34:50,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/a94015a34c7344d58e655d3b0f745be6 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/a94015a34c7344d58e655d3b0f745be6 2024-11-26T10:34:50,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/eb3e7aaa7ab0455c85cd25dac8763227 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/eb3e7aaa7ab0455c85cd25dac8763227 2024-11-26T10:34:50,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0359b2a6214a4ec598d86ab425a95ef7 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0359b2a6214a4ec598d86ab425a95ef7 2024-11-26T10:34:50,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/2d9c7f26d6ff4abab1fea3b05e739e88 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/2d9c7f26d6ff4abab1fea3b05e739e88 2024-11-26T10:34:50,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/2c728f3209a64be897ffef1a7e29bc85 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/2c728f3209a64be897ffef1a7e29bc85 2024-11-26T10:34:50,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d06bbcf4acdf4cf99db391839fa33003 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d06bbcf4acdf4cf99db391839fa33003 2024-11-26T10:34:50,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0fe6dcb9fea04cb2833dfb628ca5d7dd to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0fe6dcb9fea04cb2833dfb628ca5d7dd 2024-11-26T10:34:50,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d604a6b4e723415fa64720ed7cab4e0e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/d604a6b4e723415fa64720ed7cab4e0e 2024-11-26T10:34:50,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/1417dd1ebcdc4283b3db5f759ca3a9e5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/1417dd1ebcdc4283b3db5f759ca3a9e5 2024-11-26T10:34:50,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/c28abcd0c6de476f9f64f5bd7e15c21a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/c28abcd0c6de476f9f64f5bd7e15c21a 2024-11-26T10:34:50,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/297cc4d25a5a4453900eea3faf8d1f46 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/297cc4d25a5a4453900eea3faf8d1f46 2024-11-26T10:34:50,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/efe3b3dc5a4e4c8591b1675c90db6a49 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/efe3b3dc5a4e4c8591b1675c90db6a49 2024-11-26T10:34:50,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/ea788f6a0ce04615b5e38276e2a85b20 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/ea788f6a0ce04615b5e38276e2a85b20 2024-11-26T10:34:50,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0f1f82603b2a45c4bee99194a752e836 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0f1f82603b2a45c4bee99194a752e836 2024-11-26T10:34:50,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0401d07b2f0e4e37913aa52104fe6f27 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0401d07b2f0e4e37913aa52104fe6f27 2024-11-26T10:34:50,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/4641a4d374af423da2353d9f5708f71b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/4641a4d374af423da2353d9f5708f71b 2024-11-26T10:34:50,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/790431b298b1400aaed2913700b63559 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/790431b298b1400aaed2913700b63559 2024-11-26T10:34:50,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/14ec826a584347a69c1ff508ad385ee0 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/14ec826a584347a69c1ff508ad385ee0 2024-11-26T10:34:50,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/1cd35fe98da841ff9cb8970a6908a51e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/1cd35fe98da841ff9cb8970a6908a51e 2024-11-26T10:34:50,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/7e0db07cd28c413b8dee2e5cc10f93ab to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/7e0db07cd28c413b8dee2e5cc10f93ab 2024-11-26T10:34:50,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/dc79f16df7334dffa2a6e59900ed31fa to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/dc79f16df7334dffa2a6e59900ed31fa 2024-11-26T10:34:50,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/3a3d5779179249ca80800ca62f17bcd3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/3a3d5779179249ca80800ca62f17bcd3 2024-11-26T10:34:50,601 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/recovered.edits/395.seqid, newMaxSeqId=395, maxSeqId=1 2024-11-26T10:34:50,602 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204. 2024-11-26T10:34:50,602 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1635): Region close journal for 0dac262a6c43b2828c5201e254d47204: 2024-11-26T10:34:50,603 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(170): Closed 0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:50,604 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=0dac262a6c43b2828c5201e254d47204, regionState=CLOSED 2024-11-26T10:34:50,606 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-26T10:34:50,606 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseRegionProcedure 0dac262a6c43b2828c5201e254d47204, server=ccf62758a0a5,45419,1732617185877 in 1.5060 sec 2024-11-26T10:34:50,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-11-26T10:34:50,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0dac262a6c43b2828c5201e254d47204, UNASSIGN in 1.5100 sec 2024-11-26T10:34:50,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-26T10:34:50,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5120 sec 2024-11-26T10:34:50,609 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617290609"}]},"ts":"1732617290609"} 2024-11-26T10:34:50,609 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-26T10:34:50,651 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-26T10:34:50,653 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6090 sec 2024-11-26T10:34:51,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-26T10:34:51,155 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-26T10:34:51,157 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-26T10:34:51,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:51,160 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:51,161 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:51,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-26T10:34:51,163 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:51,166 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/recovered.edits] 2024-11-26T10:34:51,170 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5e6131fb91a34e5c8a56122f0db4dce5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/5e6131fb91a34e5c8a56122f0db4dce5 2024-11-26T10:34:51,171 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/8ecd631d4d7d4438bd96997d7eb0d157 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/8ecd631d4d7d4438bd96997d7eb0d157 2024-11-26T10:34:51,173 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/a221dba4f1c742b68cc0aca857738d9f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/a221dba4f1c742b68cc0aca857738d9f 2024-11-26T10:34:51,174 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/c6f24f2994744053a85a2039b34be9b3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/A/c6f24f2994744053a85a2039b34be9b3 2024-11-26T10:34:51,176 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1f99809dabd047888e61e739b4b177c7 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/1f99809dabd047888e61e739b4b177c7 2024-11-26T10:34:51,177 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a13513b97f56449f918bab7b4d076ec5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/a13513b97f56449f918bab7b4d076ec5 2024-11-26T10:34:51,178 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c8f40e4fa4414fc09c7b420038c2cf58 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/c8f40e4fa4414fc09c7b420038c2cf58 2024-11-26T10:34:51,179 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/dbaeeb9c1c9b480c8a3650e91b9a47c6 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/B/dbaeeb9c1c9b480c8a3650e91b9a47c6 2024-11-26T10:34:51,180 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0366dc07ce8b479b8d26eb95992bae82 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0366dc07ce8b479b8d26eb95992bae82 2024-11-26T10:34:51,181 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0c7aa6913db9483d80104d3af74de592 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/0c7aa6913db9483d80104d3af74de592 2024-11-26T10:34:51,183 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/b4b09c45404c4bdc8a0cdd4fabf58d39 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/b4b09c45404c4bdc8a0cdd4fabf58d39 2024-11-26T10:34:51,184 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/e8d9e2b87d154f4e9f300ba1402c51db to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/C/e8d9e2b87d154f4e9f300ba1402c51db 2024-11-26T10:34:51,187 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/recovered.edits/395.seqid to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204/recovered.edits/395.seqid 2024-11-26T10:34:51,187 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/0dac262a6c43b2828c5201e254d47204 2024-11-26T10:34:51,188 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-26T10:34:51,190 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:51,192 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-26T10:34:51,193 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-26T10:34:51,194 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:51,194 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-26T10:34:51,194 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732617291194"}]},"ts":"9223372036854775807"} 2024-11-26T10:34:51,195 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-26T10:34:51,195 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 0dac262a6c43b2828c5201e254d47204, NAME => 'TestAcidGuarantees,,1732617263916.0dac262a6c43b2828c5201e254d47204.', STARTKEY => '', ENDKEY => ''}] 2024-11-26T10:34:51,195 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-26T10:34:51,195 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732617291195"}]},"ts":"9223372036854775807"} 2024-11-26T10:34:51,197 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-26T10:34:51,243 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:51,245 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 85 msec 2024-11-26T10:34:51,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-26T10:34:51,262 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-11-26T10:34:51,275 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=239 (was 237) - Thread LEAK? -, OpenFileDescriptor=449 (was 449), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=317 (was 331), ProcessCount=11 (was 11), AvailableMemoryMB=5288 (was 5308) 2024-11-26T10:34:51,284 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=239, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=317, ProcessCount=11, AvailableMemoryMB=5288 2024-11-26T10:34:51,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-26T10:34:51,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:34:51,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:51,288 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:34:51,288 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:51,289 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:34:51,289 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 96 2024-11-26T10:34:51,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-26T10:34:51,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742158_1334 (size=963) 2024-11-26T10:34:51,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-26T10:34:51,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-26T10:34:51,701 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1 2024-11-26T10:34:51,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742159_1335 (size=53) 2024-11-26T10:34:51,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-26T10:34:52,111 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:34:52,111 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 061adf5fb3bc2e9358b6d3d5a6c93c59, disabling compactions & flushes 2024-11-26T10:34:52,111 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:52,111 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:52,112 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. after waiting 1 ms 2024-11-26T10:34:52,112 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:52,112 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:52,112 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:34:52,114 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:34:52,115 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732617292114"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732617292114"}]},"ts":"1732617292114"} 2024-11-26T10:34:52,118 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-26T10:34:52,119 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:34:52,120 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617292119"}]},"ts":"1732617292119"} 2024-11-26T10:34:52,121 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-26T10:34:52,168 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=061adf5fb3bc2e9358b6d3d5a6c93c59, ASSIGN}] 2024-11-26T10:34:52,169 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=061adf5fb3bc2e9358b6d3d5a6c93c59, ASSIGN 2024-11-26T10:34:52,170 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=061adf5fb3bc2e9358b6d3d5a6c93c59, ASSIGN; state=OFFLINE, location=ccf62758a0a5,45419,1732617185877; forceNewPlan=false, retain=false 2024-11-26T10:34:52,320 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=061adf5fb3bc2e9358b6d3d5a6c93c59, regionState=OPENING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:52,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; OpenRegionProcedure 061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:34:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-26T10:34:52,474 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:52,480 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:52,480 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7285): Opening region: {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:34:52,480 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:52,480 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:34:52,481 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7327): checking encryption for 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:52,481 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7330): checking classloading for 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:52,482 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:52,484 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:34:52,484 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 061adf5fb3bc2e9358b6d3d5a6c93c59 columnFamilyName A 2024-11-26T10:34:52,484 DEBUG [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:52,485 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(327): Store=061adf5fb3bc2e9358b6d3d5a6c93c59/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:34:52,485 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:52,487 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:34:52,487 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 061adf5fb3bc2e9358b6d3d5a6c93c59 columnFamilyName B 2024-11-26T10:34:52,487 DEBUG [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:52,488 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(327): Store=061adf5fb3bc2e9358b6d3d5a6c93c59/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:34:52,488 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:52,489 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:34:52,489 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 061adf5fb3bc2e9358b6d3d5a6c93c59 columnFamilyName C 2024-11-26T10:34:52,490 DEBUG [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:52,490 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(327): Store=061adf5fb3bc2e9358b6d3d5a6c93c59/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:34:52,490 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:52,492 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:52,492 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:52,495 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:34:52,497 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1085): writing seq id for 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:52,499 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:34:52,499 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1102): Opened 061adf5fb3bc2e9358b6d3d5a6c93c59; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73065370, jitterRate=0.08875885605812073}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:34:52,500 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1001): Region open journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:34:52,501 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., pid=98, masterSystemTime=1732617292473 2024-11-26T10:34:52,502 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:52,502 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:52,502 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=061adf5fb3bc2e9358b6d3d5a6c93c59, regionState=OPEN, openSeqNum=2, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:52,505 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-26T10:34:52,505 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; OpenRegionProcedure 061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 in 181 msec 2024-11-26T10:34:52,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-11-26T10:34:52,507 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=061adf5fb3bc2e9358b6d3d5a6c93c59, ASSIGN in 337 msec 2024-11-26T10:34:52,507 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:34:52,507 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617292507"}]},"ts":"1732617292507"} 2024-11-26T10:34:52,508 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-26T10:34:52,551 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:34:52,553 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2660 sec 2024-11-26T10:34:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-26T10:34:53,400 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-11-26T10:34:53,403 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b976e1a to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1df61dc9 2024-11-26T10:34:53,436 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fe71801, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:53,437 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:53,438 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51300, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:53,439 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:34:53,440 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45048, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:34:53,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-26T10:34:53,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:34:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-26T10:34:53,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742160_1336 (size=999) 2024-11-26T10:34:53,852 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-26T10:34:53,852 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-26T10:34:53,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-26T10:34:53,857 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=061adf5fb3bc2e9358b6d3d5a6c93c59, REOPEN/MOVE}] 2024-11-26T10:34:53,858 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=061adf5fb3bc2e9358b6d3d5a6c93c59, REOPEN/MOVE 2024-11-26T10:34:53,859 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=061adf5fb3bc2e9358b6d3d5a6c93c59, regionState=CLOSING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:53,860 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-26T10:34:53,860 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; CloseRegionProcedure 061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:34:54,012 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,013 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(124): Close 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,014 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-26T10:34:54,014 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1681): Closing 061adf5fb3bc2e9358b6d3d5a6c93c59, disabling compactions & flushes 2024-11-26T10:34:54,014 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,014 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,014 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. after waiting 0 ms 2024-11-26T10:34:54,014 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,023 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-26T10:34:54,024 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,024 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1635): Region close journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:34:54,024 WARN [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionServer(3786): Not adding moved region record: 061adf5fb3bc2e9358b6d3d5a6c93c59 to self. 2024-11-26T10:34:54,025 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(170): Closed 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,026 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=061adf5fb3bc2e9358b6d3d5a6c93c59, regionState=CLOSED 2024-11-26T10:34:54,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-26T10:34:54,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; CloseRegionProcedure 061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 in 167 msec 2024-11-26T10:34:54,029 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=061adf5fb3bc2e9358b6d3d5a6c93c59, REOPEN/MOVE; state=CLOSED, location=ccf62758a0a5,45419,1732617185877; forceNewPlan=false, retain=true 2024-11-26T10:34:54,180 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=061adf5fb3bc2e9358b6d3d5a6c93c59, regionState=OPENING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,203 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure 061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:34:54,357 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,364 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,364 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:34:54,366 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,366 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:34:54,366 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,367 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,370 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,372 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:34:54,372 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 061adf5fb3bc2e9358b6d3d5a6c93c59 columnFamilyName A 2024-11-26T10:34:54,374 DEBUG [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:54,374 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(327): Store=061adf5fb3bc2e9358b6d3d5a6c93c59/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:34:54,375 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,376 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:34:54,376 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 061adf5fb3bc2e9358b6d3d5a6c93c59 columnFamilyName B 2024-11-26T10:34:54,376 DEBUG [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:54,376 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(327): Store=061adf5fb3bc2e9358b6d3d5a6c93c59/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:34:54,376 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,377 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:34:54,377 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 061adf5fb3bc2e9358b6d3d5a6c93c59 columnFamilyName C 2024-11-26T10:34:54,377 DEBUG [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:54,378 INFO [StoreOpener-061adf5fb3bc2e9358b6d3d5a6c93c59-1 {}] regionserver.HStore(327): Store=061adf5fb3bc2e9358b6d3d5a6c93c59/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:34:54,378 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,378 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,380 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,381 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:34:54,382 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,383 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 061adf5fb3bc2e9358b6d3d5a6c93c59; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60818438, jitterRate=-0.09373465180397034}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:34:54,384 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:34:54,385 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., pid=103, masterSystemTime=1732617294357 2024-11-26T10:34:54,386 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,387 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,387 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=061adf5fb3bc2e9358b6d3d5a6c93c59, regionState=OPEN, openSeqNum=5, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,389 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-11-26T10:34:54,389 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure 061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 in 185 msec 2024-11-26T10:34:54,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-26T10:34:54,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=061adf5fb3bc2e9358b6d3d5a6c93c59, REOPEN/MOVE in 532 msec 2024-11-26T10:34:54,393 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-26T10:34:54,393 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 537 msec 2024-11-26T10:34:54,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 952 msec 2024-11-26T10:34:54,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-26T10:34:54,399 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b82ba2a to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3637e4c6 2024-11-26T10:34:54,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51f7d511, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:54,443 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b6cf8cb to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72f422b4 2024-11-26T10:34:54,461 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc42ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:54,461 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ec15031 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2df33cdf 2024-11-26T10:34:54,469 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117e86d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:54,470 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dd5b441 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9f472e0 2024-11-26T10:34:54,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd96549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:54,478 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c336ea4 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167a78b0 2024-11-26T10:34:54,486 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31aea41b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:54,486 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5aee939b to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e247aa1 2024-11-26T10:34:54,494 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@801ba40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:54,495 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f49665c to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2205f666 2024-11-26T10:34:54,502 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27539bdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:54,504 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683f8469 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6584e9ce 2024-11-26T10:34:54,511 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3203d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:54,512 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e4d3d0 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37ec8e3b 2024-11-26T10:34:54,519 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798e7fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:54,520 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b308f62 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@787e5169 2024-11-26T10:34:54,528 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7284f16d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:34:54,533 DEBUG [hconnection-0x4c87fe51-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:54,533 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:54,533 DEBUG [hconnection-0x45b283ed-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:54,534 DEBUG [hconnection-0x75885f97-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:54,534 DEBUG [hconnection-0x9f3985b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:54,535 DEBUG [hconnection-0x1747789-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:54,535 DEBUG [hconnection-0x31bef7b7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:54,535 DEBUG [hconnection-0x12270ebb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:54,535 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51312, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:54,535 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51338, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:54,535 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51346, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:54,536 DEBUG [hconnection-0xba68477-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:54,536 DEBUG [hconnection-0x67073526-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:54,536 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51360, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:54,536 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51374, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:54,536 DEBUG [hconnection-0x1ca273be-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:34:54,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-26T10:34:54,536 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51326, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:54,536 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:54,536 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51388, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:54,537 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51382, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:54,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-26T10:34:54,537 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:54,537 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51398, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:34:54,538 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:54,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:54,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-26T10:34:54,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:34:54,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:54,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:34:54,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:54,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:34:54,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:54,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617354561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617354563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617354563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617354564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617354564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126ba8401bfc265401cb755faca097cf437_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617294544/Put/seqid=0 2024-11-26T10:34:54,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742161_1337 (size=12154) 2024-11-26T10:34:54,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-26T10:34:54,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617354664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617354670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617354670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617354670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617354670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,689 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-26T10:34:54,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:54,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,690 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:54,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:54,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:54,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-26T10:34:54,841 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,842 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-26T10:34:54,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:54,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:54,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:54,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:54,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617354866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617354874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617354875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617354875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:54,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617354875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,988 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:54,991 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126ba8401bfc265401cb755faca097cf437_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126ba8401bfc265401cb755faca097cf437_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:54,992 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/b62599f6950b463491629d5adb37106d, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:34:54,992 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/b62599f6950b463491629d5adb37106d is 175, key is test_row_0/A:col10/1732617294544/Put/seqid=0 2024-11-26T10:34:54,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:54,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-26T10:34:54,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:54,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:54,994 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:54,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:54,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:54,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742162_1338 (size=30955) 2024-11-26T10:34:54,997 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/b62599f6950b463491629d5adb37106d 2024-11-26T10:34:55,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/17092df6d25449c9b3ab168a2aba3686 is 50, key is test_row_0/B:col10/1732617294544/Put/seqid=0 2024-11-26T10:34:55,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742163_1339 (size=12001) 2024-11-26T10:34:55,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-26T10:34:55,146 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-26T10:34:55,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:55,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:55,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:55,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:55,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617355170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:55,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617355179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:55,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617355179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:55,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617355180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:55,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617355180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,298 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-26T10:34:55,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:55,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:55,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:55,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/17092df6d25449c9b3ab168a2aba3686 2024-11-26T10:34:55,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/0d3987e8aba3495a93b44b58f7e98d05 is 50, key is test_row_0/C:col10/1732617294544/Put/seqid=0 2024-11-26T10:34:55,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742164_1340 (size=12001) 2024-11-26T10:34:55,451 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-26T10:34:55,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:55,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:55,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:55,451 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,603 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,603 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-26T10:34:55,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:55,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:55,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:55,604 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-26T10:34:55,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617355675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:55,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617355683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617355683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617355684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:55,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617355688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,755 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-26T10:34:55,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:55,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:55,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:55,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:55,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/0d3987e8aba3495a93b44b58f7e98d05 2024-11-26T10:34:55,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/b62599f6950b463491629d5adb37106d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/b62599f6950b463491629d5adb37106d 2024-11-26T10:34:55,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/b62599f6950b463491629d5adb37106d, entries=150, sequenceid=18, filesize=30.2 K 2024-11-26T10:34:55,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/17092df6d25449c9b3ab168a2aba3686 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/17092df6d25449c9b3ab168a2aba3686 2024-11-26T10:34:55,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/17092df6d25449c9b3ab168a2aba3686, entries=150, sequenceid=18, filesize=11.7 K 2024-11-26T10:34:55,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/0d3987e8aba3495a93b44b58f7e98d05 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0d3987e8aba3495a93b44b58f7e98d05 2024-11-26T10:34:55,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0d3987e8aba3495a93b44b58f7e98d05, entries=150, sequenceid=18, filesize=11.7 K 2024-11-26T10:34:55,863 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1318ms, sequenceid=18, compaction requested=false 2024-11-26T10:34:55,864 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-26T10:34:55,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:34:55,908 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:55,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-26T10:34:55,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:55,908 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-26T10:34:55,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:34:55,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:55,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:34:55,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:55,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:34:55,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:55,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126e9ceda037c9840889048905c32bd9307_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617294561/Put/seqid=0 2024-11-26T10:34:55,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742165_1341 (size=12154) 2024-11-26T10:34:56,135 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-26T10:34:56,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:56,322 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126e9ceda037c9840889048905c32bd9307_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126e9ceda037c9840889048905c32bd9307_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:56,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/cbd026c361de4f5880e3236435864ae0, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:34:56,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/cbd026c361de4f5880e3236435864ae0 is 175, key is test_row_0/A:col10/1732617294561/Put/seqid=0 2024-11-26T10:34:56,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742166_1342 (size=30955) 2024-11-26T10:34:56,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-26T10:34:56,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:56,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:56,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:56,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617356694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:56,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:56,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617356695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:56,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:56,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617356696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:56,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:56,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617356697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:56,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:56,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617356701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:56,727 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/cbd026c361de4f5880e3236435864ae0 2024-11-26T10:34:56,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/5dbbe565ff474de69031891656a36d87 is 50, key is test_row_0/B:col10/1732617294561/Put/seqid=0 2024-11-26T10:34:56,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742167_1343 (size=12001) 2024-11-26T10:34:56,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:56,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617356802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:56,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:56,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617356803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:56,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:56,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617356803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:56,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:56,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617356807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617357007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617357008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617357008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617357011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,137 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/5dbbe565ff474de69031891656a36d87 2024-11-26T10:34:57,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/1a9c2e8ea84542fa8d6718e2a07015f9 is 50, key is test_row_0/C:col10/1732617294561/Put/seqid=0 2024-11-26T10:34:57,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742168_1344 (size=12001) 2024-11-26T10:34:57,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617357311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617357312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617357312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617357314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,560 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/1a9c2e8ea84542fa8d6718e2a07015f9 2024-11-26T10:34:57,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/cbd026c361de4f5880e3236435864ae0 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/cbd026c361de4f5880e3236435864ae0 2024-11-26T10:34:57,567 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/cbd026c361de4f5880e3236435864ae0, entries=150, sequenceid=42, filesize=30.2 K 2024-11-26T10:34:57,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/5dbbe565ff474de69031891656a36d87 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5dbbe565ff474de69031891656a36d87 2024-11-26T10:34:57,571 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5dbbe565ff474de69031891656a36d87, entries=150, sequenceid=42, filesize=11.7 K 2024-11-26T10:34:57,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/1a9c2e8ea84542fa8d6718e2a07015f9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/1a9c2e8ea84542fa8d6718e2a07015f9 2024-11-26T10:34:57,577 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/1a9c2e8ea84542fa8d6718e2a07015f9, entries=150, sequenceid=42, filesize=11.7 K 2024-11-26T10:34:57,578 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1669ms, sequenceid=42, compaction requested=false 2024-11-26T10:34:57,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:34:57,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:57,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-26T10:34:57,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-26T10:34:57,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-26T10:34:57,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0410 sec 2024-11-26T10:34:57,580 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 3.0470 sec 2024-11-26T10:34:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:57,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:34:57,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:34:57,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:57,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:34:57,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:57,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:34:57,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:57,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126ea9fa7b740434dbb8db73eda870a8b26_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617296695/Put/seqid=0 2024-11-26T10:34:57,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742169_1345 (size=14594) 2024-11-26T10:34:57,831 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:57,835 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126ea9fa7b740434dbb8db73eda870a8b26_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126ea9fa7b740434dbb8db73eda870a8b26_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:57,836 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/bff4a61f3aa1462c8fef7e60a1579f7f, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:34:57,837 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/bff4a61f3aa1462c8fef7e60a1579f7f is 175, key is test_row_0/A:col10/1732617296695/Put/seqid=0 2024-11-26T10:34:57,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742170_1346 (size=39549) 2024-11-26T10:34:57,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617357854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617357855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617357858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617357858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617357961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617357961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617357964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:57,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617357964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617358165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617358165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617358169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617358170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,243 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/bff4a61f3aa1462c8fef7e60a1579f7f 2024-11-26T10:34:58,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/02a1a5cc4aa24081a78698339bdc901e is 50, key is test_row_0/B:col10/1732617296695/Put/seqid=0 2024-11-26T10:34:58,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742171_1347 (size=12001) 2024-11-26T10:34:58,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617358468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617358469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617358476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617358476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-26T10:34:58,641 INFO [Thread-1539 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-26T10:34:58,642 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:34:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-26T10:34:58,643 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:34:58,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-26T10:34:58,643 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:34:58,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:34:58,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/02a1a5cc4aa24081a78698339bdc901e 2024-11-26T10:34:58,657 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/14aa4084f6ff4361bfa0113023ac3705 is 50, key is test_row_0/C:col10/1732617296695/Put/seqid=0 2024-11-26T10:34:58,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742172_1348 (size=12001) 2024-11-26T10:34:58,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617358717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,722 DEBUG [Thread-1533 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4160 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:34:58,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-26T10:34:58,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-26T10:34:58,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:58,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:58,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:58,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:58,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:58,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:58,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-26T10:34:58,946 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-26T10:34:58,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:58,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:58,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:58,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:58,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:58,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:34:58,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617358973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617358977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617358980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:58,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:34:58,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617358981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:59,061 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/14aa4084f6ff4361bfa0113023ac3705 2024-11-26T10:34:59,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/bff4a61f3aa1462c8fef7e60a1579f7f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/bff4a61f3aa1462c8fef7e60a1579f7f 2024-11-26T10:34:59,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/bff4a61f3aa1462c8fef7e60a1579f7f, entries=200, sequenceid=55, filesize=38.6 K 2024-11-26T10:34:59,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/02a1a5cc4aa24081a78698339bdc901e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/02a1a5cc4aa24081a78698339bdc901e 2024-11-26T10:34:59,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/02a1a5cc4aa24081a78698339bdc901e, entries=150, sequenceid=55, filesize=11.7 K 2024-11-26T10:34:59,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/14aa4084f6ff4361bfa0113023ac3705 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/14aa4084f6ff4361bfa0113023ac3705 2024-11-26T10:34:59,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/14aa4084f6ff4361bfa0113023ac3705, entries=150, sequenceid=55, filesize=11.7 K 2024-11-26T10:34:59,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1257ms, sequenceid=55, compaction requested=true 2024-11-26T10:34:59,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:34:59,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:34:59,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:59,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:34:59,076 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:59,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:59,076 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:59,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:34:59,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:59,077 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:59,077 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:59,077 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/A is initiating minor compaction (all files) 2024-11-26T10:34:59,077 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/B is initiating minor compaction (all files) 2024-11-26T10:34:59,077 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/B in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:59,077 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/A in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:59,077 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/b62599f6950b463491629d5adb37106d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/cbd026c361de4f5880e3236435864ae0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/bff4a61f3aa1462c8fef7e60a1579f7f] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=99.1 K 2024-11-26T10:34:59,077 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/17092df6d25449c9b3ab168a2aba3686, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5dbbe565ff474de69031891656a36d87, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/02a1a5cc4aa24081a78698339bdc901e] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=35.2 K 2024-11-26T10:34:59,077 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:59,078 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/b62599f6950b463491629d5adb37106d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/cbd026c361de4f5880e3236435864ae0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/bff4a61f3aa1462c8fef7e60a1579f7f] 2024-11-26T10:34:59,078 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 17092df6d25449c9b3ab168a2aba3686, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732617294544 2024-11-26T10:34:59,078 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting b62599f6950b463491629d5adb37106d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732617294544 2024-11-26T10:34:59,078 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 5dbbe565ff474de69031891656a36d87, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732617294561 2024-11-26T10:34:59,078 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbd026c361de4f5880e3236435864ae0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732617294561 2024-11-26T10:34:59,078 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting bff4a61f3aa1462c8fef7e60a1579f7f, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617296690 2024-11-26T10:34:59,078 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 02a1a5cc4aa24081a78698339bdc901e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617296695 2024-11-26T10:34:59,083 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:34:59,084 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#B#compaction#300 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:59,085 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/05d36e347e7644e9a1b2b6c3dfda78ea is 50, key is test_row_0/B:col10/1732617296695/Put/seqid=0 2024-11-26T10:34:59,087 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411266e2a914986af4ab696e6026be296fe70_061adf5fb3bc2e9358b6d3d5a6c93c59 store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:34:59,089 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411266e2a914986af4ab696e6026be296fe70_061adf5fb3bc2e9358b6d3d5a6c93c59, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:34:59,090 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411266e2a914986af4ab696e6026be296fe70_061adf5fb3bc2e9358b6d3d5a6c93c59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:34:59,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742173_1349 (size=12104) 2024-11-26T10:34:59,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742174_1350 (size=4469) 2024-11-26T10:34:59,099 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:34:59,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-26T10:34:59,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:59,100 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-26T10:34:59,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:34:59,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:59,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:34:59,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:59,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:34:59,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:34:59,102 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#A#compaction#299 average throughput is 1.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:59,103 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/40a2119b2b5f431fb3304d72b53cd2a1 is 175, key is test_row_0/A:col10/1732617296695/Put/seqid=0 2024-11-26T10:34:59,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112664cb4baa430b4c1a9fc4c60ad4fae23d_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617297857/Put/seqid=0 2024-11-26T10:34:59,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742175_1351 (size=31058) 2024-11-26T10:34:59,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742176_1352 (size=12154) 2024-11-26T10:34:59,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-26T10:34:59,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:34:59,520 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112664cb4baa430b4c1a9fc4c60ad4fae23d_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112664cb4baa430b4c1a9fc4c60ad4fae23d_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:34:59,520 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/40a2119b2b5f431fb3304d72b53cd2a1 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/40a2119b2b5f431fb3304d72b53cd2a1 2024-11-26T10:34:59,520 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/05d36e347e7644e9a1b2b6c3dfda78ea as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/05d36e347e7644e9a1b2b6c3dfda78ea 2024-11-26T10:34:59,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/35af9bfb4f8f40008369bf8b3345bfe0, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:34:59,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/35af9bfb4f8f40008369bf8b3345bfe0 is 175, key is test_row_0/A:col10/1732617297857/Put/seqid=0 2024-11-26T10:34:59,527 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/B of 061adf5fb3bc2e9358b6d3d5a6c93c59 into 05d36e347e7644e9a1b2b6c3dfda78ea(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:59,527 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/A of 061adf5fb3bc2e9358b6d3d5a6c93c59 into 40a2119b2b5f431fb3304d72b53cd2a1(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:59,527 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:34:59,527 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:34:59,527 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/B, priority=13, startTime=1732617299076; duration=0sec 2024-11-26T10:34:59,527 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/A, priority=13, startTime=1732617299075; duration=0sec 2024-11-26T10:34:59,527 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:34:59,527 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:A 2024-11-26T10:34:59,527 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:34:59,527 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:59,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742177_1353 (size=30955) 2024-11-26T10:34:59,527 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:B 2024-11-26T10:34:59,531 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/35af9bfb4f8f40008369bf8b3345bfe0 2024-11-26T10:34:59,531 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:34:59,531 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/C is initiating minor compaction (all files) 2024-11-26T10:34:59,531 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/C in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:34:59,531 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0d3987e8aba3495a93b44b58f7e98d05, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/1a9c2e8ea84542fa8d6718e2a07015f9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/14aa4084f6ff4361bfa0113023ac3705] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=35.2 K 2024-11-26T10:34:59,532 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d3987e8aba3495a93b44b58f7e98d05, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732617294544 2024-11-26T10:34:59,532 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a9c2e8ea84542fa8d6718e2a07015f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732617294561 2024-11-26T10:34:59,532 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14aa4084f6ff4361bfa0113023ac3705, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617296695 2024-11-26T10:34:59,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/2c796c6bbed440c896a0ddb406576c5f is 50, key is test_row_0/B:col10/1732617297857/Put/seqid=0 2024-11-26T10:34:59,541 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#C#compaction#303 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:34:59,541 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/2a30f70605934fdd9352d0b39dd54931 is 50, key is test_row_0/C:col10/1732617296695/Put/seqid=0 2024-11-26T10:34:59,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742178_1354 (size=12001) 2024-11-26T10:34:59,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742179_1355 (size=12104) 2024-11-26T10:34:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-26T10:34:59,947 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/2c796c6bbed440c896a0ddb406576c5f 2024-11-26T10:34:59,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/e8e7f258e39e41348242f3c76ede2e4e is 50, key is test_row_0/C:col10/1732617297857/Put/seqid=0 2024-11-26T10:34:59,958 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/2a30f70605934fdd9352d0b39dd54931 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2a30f70605934fdd9352d0b39dd54931 2024-11-26T10:34:59,962 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/C of 061adf5fb3bc2e9358b6d3d5a6c93c59 into 2a30f70605934fdd9352d0b39dd54931(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:34:59,962 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:34:59,962 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/C, priority=13, startTime=1732617299076; duration=0sec 2024-11-26T10:34:59,962 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:34:59,962 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:C 2024-11-26T10:34:59,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742180_1356 (size=12001) 2024-11-26T10:34:59,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:34:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:00,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617359995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617359996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617359998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617359999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617360101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617360101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617360104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617360104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617360303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617360306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617360306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617360306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,365 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/e8e7f258e39e41348242f3c76ede2e4e 2024-11-26T10:35:00,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/35af9bfb4f8f40008369bf8b3345bfe0 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/35af9bfb4f8f40008369bf8b3345bfe0 2024-11-26T10:35:00,375 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/35af9bfb4f8f40008369bf8b3345bfe0, entries=150, sequenceid=79, filesize=30.2 K 2024-11-26T10:35:00,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/2c796c6bbed440c896a0ddb406576c5f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/2c796c6bbed440c896a0ddb406576c5f 2024-11-26T10:35:00,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,379 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/2c796c6bbed440c896a0ddb406576c5f, entries=150, sequenceid=79, filesize=11.7 K 2024-11-26T10:35:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/e8e7f258e39e41348242f3c76ede2e4e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/e8e7f258e39e41348242f3c76ede2e4e 2024-11-26T10:35:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,385 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/e8e7f258e39e41348242f3c76ede2e4e, entries=150, sequenceid=79, filesize=11.7 K 2024-11-26T10:35:00,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,385 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1286ms, sequenceid=79, compaction requested=false 2024-11-26T10:35:00,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:00,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:00,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-26T10:35:00,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-26T10:35:00,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-26T10:35:00,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7440 sec 2024-11-26T10:35:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 1.7460 sec 2024-11-26T10:35:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:00,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,634 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:35:00,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:35:00,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:00,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:35:00,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:00,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:35:00,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:00,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126f9b8648ea576459bb86169e496e88ba5_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617300633/Put/seqid=0 2024-11-26T10:35:00,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742181_1357 (size=12154) 2024-11-26T10:35:00,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:00,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617360678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617360682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617360682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617360684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-26T10:35:00,746 INFO [Thread-1539 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-26T10:35:00,747 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:00,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-26T10:35:00,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-26T10:35:00,748 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:00,748 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:00,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:00,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617360784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617360790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617360790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617360790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-26T10:35:00,900 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,900 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-26T10:35:00,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:00,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:00,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:00,900 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:00,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:00,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:00,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617360987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617360995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617360995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:00,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:00,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617360995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-26T10:35:01,052 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,052 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:01,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-26T10:35:01,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:01,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,053 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,056 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126f9b8648ea576459bb86169e496e88ba5_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f9b8648ea576459bb86169e496e88ba5_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:01,056 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/9e77a53747b04f6e936732cb2781a10c, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:01,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/9e77a53747b04f6e936732cb2781a10c is 175, key is test_row_0/A:col10/1732617300633/Put/seqid=0 2024-11-26T10:35:01,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742182_1358 (size=30951) 2024-11-26T10:35:01,061 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/9e77a53747b04f6e936732cb2781a10c 2024-11-26T10:35:01,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/ba51816472864df3ad6c092e6a88a59e is 50, key is test_row_0/B:col10/1732617300633/Put/seqid=0 2024-11-26T10:35:01,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742183_1359 (size=9657) 2024-11-26T10:35:01,205 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-26T10:35:01,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:01,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,206 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617361293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617361299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:01,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617361299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:01,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617361300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-26T10:35:01,357 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-26T10:35:01,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:01,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,470 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/ba51816472864df3ad6c092e6a88a59e 2024-11-26T10:35:01,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/191c95bbc6e8418f89c1317b94c5e147 is 50, key is test_row_0/C:col10/1732617300633/Put/seqid=0 2024-11-26T10:35:01,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742184_1360 (size=9657) 2024-11-26T10:35:01,510 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-26T10:35:01,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:01,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,662 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-26T10:35:01,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:01,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,663 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:01,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617361802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:01,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617361802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:01,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617361806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:01,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617361806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,815 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-26T10:35:01,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:01,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:01,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-26T10:35:01,879 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/191c95bbc6e8418f89c1317b94c5e147 2024-11-26T10:35:01,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/9e77a53747b04f6e936732cb2781a10c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/9e77a53747b04f6e936732cb2781a10c 2024-11-26T10:35:01,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/9e77a53747b04f6e936732cb2781a10c, entries=150, sequenceid=95, filesize=30.2 K 2024-11-26T10:35:01,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/ba51816472864df3ad6c092e6a88a59e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/ba51816472864df3ad6c092e6a88a59e 2024-11-26T10:35:01,889 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/ba51816472864df3ad6c092e6a88a59e, entries=100, sequenceid=95, filesize=9.4 K 2024-11-26T10:35:01,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/191c95bbc6e8418f89c1317b94c5e147 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/191c95bbc6e8418f89c1317b94c5e147 2024-11-26T10:35:01,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/191c95bbc6e8418f89c1317b94c5e147, entries=100, sequenceid=95, filesize=9.4 K 2024-11-26T10:35:01,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1261ms, sequenceid=95, compaction requested=true 2024-11-26T10:35:01,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:01,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:01,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:01,896 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:01,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:01,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:01,896 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:01,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:01,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:01,897 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92964 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:01,897 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:01,897 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/A is initiating minor compaction (all files) 2024-11-26T10:35:01,897 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/B is initiating minor compaction (all files) 2024-11-26T10:35:01,897 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/A in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,897 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/B in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,897 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/40a2119b2b5f431fb3304d72b53cd2a1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/35af9bfb4f8f40008369bf8b3345bfe0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/9e77a53747b04f6e936732cb2781a10c] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=90.8 K 2024-11-26T10:35:01,897 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/05d36e347e7644e9a1b2b6c3dfda78ea, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/2c796c6bbed440c896a0ddb406576c5f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/ba51816472864df3ad6c092e6a88a59e] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=33.0 K 2024-11-26T10:35:01,897 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,897 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/40a2119b2b5f431fb3304d72b53cd2a1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/35af9bfb4f8f40008369bf8b3345bfe0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/9e77a53747b04f6e936732cb2781a10c] 2024-11-26T10:35:01,897 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 05d36e347e7644e9a1b2b6c3dfda78ea, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617296695 2024-11-26T10:35:01,897 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40a2119b2b5f431fb3304d72b53cd2a1, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617296695 2024-11-26T10:35:01,897 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c796c6bbed440c896a0ddb406576c5f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732617297839 2024-11-26T10:35:01,898 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35af9bfb4f8f40008369bf8b3345bfe0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732617297839 2024-11-26T10:35:01,898 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting ba51816472864df3ad6c092e6a88a59e, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732617299998 2024-11-26T10:35:01,898 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e77a53747b04f6e936732cb2781a10c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732617299997 2024-11-26T10:35:01,905 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#B#compaction#308 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:01,905 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/872cc1746c354434aebec3b77df6ef76 is 50, key is test_row_0/B:col10/1732617300633/Put/seqid=0 2024-11-26T10:35:01,907 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:01,911 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411263b6dc43de38a4698ac2a56728e25754d_061adf5fb3bc2e9358b6d3d5a6c93c59 store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:01,913 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411263b6dc43de38a4698ac2a56728e25754d_061adf5fb3bc2e9358b6d3d5a6c93c59, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:01,913 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411263b6dc43de38a4698ac2a56728e25754d_061adf5fb3bc2e9358b6d3d5a6c93c59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:01,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742185_1361 (size=12207) 2024-11-26T10:35:01,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742186_1362 (size=4469) 2024-11-26T10:35:01,967 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:01,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-26T10:35:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:01,968 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-26T10:35:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:35:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:35:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:35:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:01,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112698a815f535b3409ca1a136207b2ca8d9_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617300683/Put/seqid=0 2024-11-26T10:35:01,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742187_1363 (size=12154) 2024-11-26T10:35:01,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:01,980 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112698a815f535b3409ca1a136207b2ca8d9_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112698a815f535b3409ca1a136207b2ca8d9_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:01,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/ae67b366b4ee4bc38f59f6a6f93b0737, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:01,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/ae67b366b4ee4bc38f59f6a6f93b0737 is 175, key is test_row_0/A:col10/1732617300683/Put/seqid=0 2024-11-26T10:35:01,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742188_1364 (size=30955) 2024-11-26T10:35:02,320 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/872cc1746c354434aebec3b77df6ef76 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/872cc1746c354434aebec3b77df6ef76 2024-11-26T10:35:02,323 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#A#compaction#309 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:02,323 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/B of 061adf5fb3bc2e9358b6d3d5a6c93c59 into 872cc1746c354434aebec3b77df6ef76(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:02,323 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:02,323 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/B, priority=13, startTime=1732617301896; duration=0sec 2024-11-26T10:35:02,323 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:02,323 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:B 2024-11-26T10:35:02,324 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:02,324 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/75919e567cc54c34a6eeea9b4ef17456 is 175, key is test_row_0/A:col10/1732617300633/Put/seqid=0 2024-11-26T10:35:02,324 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:02,324 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/C is initiating minor compaction (all files) 2024-11-26T10:35:02,324 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/C in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:02,324 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2a30f70605934fdd9352d0b39dd54931, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/e8e7f258e39e41348242f3c76ede2e4e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/191c95bbc6e8418f89c1317b94c5e147] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=33.0 K 2024-11-26T10:35:02,325 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a30f70605934fdd9352d0b39dd54931, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732617296695 2024-11-26T10:35:02,327 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting e8e7f258e39e41348242f3c76ede2e4e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732617297839 2024-11-26T10:35:02,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742189_1365 (size=31268) 2024-11-26T10:35:02,327 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 191c95bbc6e8418f89c1317b94c5e147, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732617299998 2024-11-26T10:35:02,334 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#C#compaction#311 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:02,334 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/d93fc883278541cdb1d803cae1d13895 is 50, key is test_row_0/C:col10/1732617300633/Put/seqid=0 2024-11-26T10:35:02,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742190_1366 (size=12207) 2024-11-26T10:35:02,385 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/ae67b366b4ee4bc38f59f6a6f93b0737 2024-11-26T10:35:02,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/13a4f8a32f6a4dbb812656ef8ab6d702 is 50, key is test_row_0/B:col10/1732617300683/Put/seqid=0 2024-11-26T10:35:02,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742191_1367 (size=12001) 2024-11-26T10:35:02,396 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/13a4f8a32f6a4dbb812656ef8ab6d702 2024-11-26T10:35:02,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/9dc5468999a64de395a7ad570ea7d024 is 50, key is test_row_0/C:col10/1732617300683/Put/seqid=0 2024-11-26T10:35:02,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742192_1368 (size=12001) 2024-11-26T10:35:02,730 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/75919e567cc54c34a6eeea9b4ef17456 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/75919e567cc54c34a6eeea9b4ef17456 2024-11-26T10:35:02,734 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/A of 061adf5fb3bc2e9358b6d3d5a6c93c59 into 75919e567cc54c34a6eeea9b4ef17456(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:02,734 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:02,734 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/A, priority=13, startTime=1732617301896; duration=0sec 2024-11-26T10:35:02,734 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:02,734 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:A 2024-11-26T10:35:02,741 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/d93fc883278541cdb1d803cae1d13895 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/d93fc883278541cdb1d803cae1d13895 2024-11-26T10:35:02,745 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/C of 061adf5fb3bc2e9358b6d3d5a6c93c59 into d93fc883278541cdb1d803cae1d13895(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:02,745 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:02,745 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/C, priority=13, startTime=1732617301896; duration=0sec 2024-11-26T10:35:02,745 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:02,745 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:C 2024-11-26T10:35:02,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:02,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:02,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:02,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617362790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:02,805 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/9dc5468999a64de395a7ad570ea7d024 2024-11-26T10:35:02,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:02,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617362803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:02,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/ae67b366b4ee4bc38f59f6a6f93b0737 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/ae67b366b4ee4bc38f59f6a6f93b0737 2024-11-26T10:35:02,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:02,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617362807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:02,811 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/ae67b366b4ee4bc38f59f6a6f93b0737, entries=150, sequenceid=118, filesize=30.2 K 2024-11-26T10:35:02,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/13a4f8a32f6a4dbb812656ef8ab6d702 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/13a4f8a32f6a4dbb812656ef8ab6d702 2024-11-26T10:35:02,815 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/13a4f8a32f6a4dbb812656ef8ab6d702, entries=150, sequenceid=118, filesize=11.7 K 2024-11-26T10:35:02,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/9dc5468999a64de395a7ad570ea7d024 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/9dc5468999a64de395a7ad570ea7d024 2024-11-26T10:35:02,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:02,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617362814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:02,819 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/9dc5468999a64de395a7ad570ea7d024, entries=150, sequenceid=118, filesize=11.7 K 2024-11-26T10:35:02,820 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 851ms, sequenceid=118, compaction requested=false 2024-11-26T10:35:02,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:02,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:02,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-26T10:35:02,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-26T10:35:02,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:02,821 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-26T10:35:02,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:35:02,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:02,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:35:02,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:02,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:35:02,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:02,822 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-26T10:35:02,822 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0710 sec 2024-11-26T10:35:02,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.0750 sec 2024-11-26T10:35:02,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126793d905c86bb4fa084489a3b1c6cf3d6_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617302762/Put/seqid=0 2024-11-26T10:35:02,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742193_1369 (size=14744) 2024-11-26T10:35:02,830 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:02,832 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126793d905c86bb4fa084489a3b1c6cf3d6_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126793d905c86bb4fa084489a3b1c6cf3d6_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:02,833 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/694ada3d618745a59ccde96d6178faaf, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:02,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/694ada3d618745a59ccde96d6178faaf is 175, key is test_row_0/A:col10/1732617302762/Put/seqid=0 2024-11-26T10:35:02,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742194_1370 (size=39699) 2024-11-26T10:35:02,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-26T10:35:02,851 INFO [Thread-1539 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-26T10:35:02,852 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:02,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-26T10:35:02,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-26T10:35:02,853 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:02,854 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:02,854 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:02,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:02,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617362919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:02,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:02,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617362923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-26T10:35:03,005 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-26T10:35:03,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:03,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,005 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617363027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617363030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-26T10:35:03,157 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-26T10:35:03,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:03,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:03,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617363233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:03,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617363233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,237 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/694ada3d618745a59ccde96d6178faaf 2024-11-26T10:35:03,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/10fde9c06b124c3ca408be76b96ee11b is 50, key is test_row_0/B:col10/1732617302762/Put/seqid=0 2024-11-26T10:35:03,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742195_1371 (size=12101) 2024-11-26T10:35:03,309 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-26T10:35:03,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:03,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,310 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-26T10:35:03,462 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-26T10:35:03,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:03,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,462 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:03,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617363538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:03,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617363539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,614 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-26T10:35:03,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:03,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,614 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,647 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/10fde9c06b124c3ca408be76b96ee11b 2024-11-26T10:35:03,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/8743fcf13e8a46028a993f472a3ccbf3 is 50, key is test_row_0/C:col10/1732617302762/Put/seqid=0 2024-11-26T10:35:03,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742196_1372 (size=12101) 2024-11-26T10:35:03,766 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-26T10:35:03,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:03,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,766 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,918 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:03,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-26T10:35:03,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:03,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:03,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-26T10:35:04,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:04,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617364043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:04,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:04,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617364044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:04,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/8743fcf13e8a46028a993f472a3ccbf3 2024-11-26T10:35:04,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/694ada3d618745a59ccde96d6178faaf as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/694ada3d618745a59ccde96d6178faaf 2024-11-26T10:35:04,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/694ada3d618745a59ccde96d6178faaf, entries=200, sequenceid=135, filesize=38.8 K 2024-11-26T10:35:04,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/10fde9c06b124c3ca408be76b96ee11b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/10fde9c06b124c3ca408be76b96ee11b 2024-11-26T10:35:04,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/10fde9c06b124c3ca408be76b96ee11b, entries=150, sequenceid=135, filesize=11.8 K 2024-11-26T10:35:04,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/8743fcf13e8a46028a993f472a3ccbf3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/8743fcf13e8a46028a993f472a3ccbf3 2024-11-26T10:35:04,070 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:04,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-26T10:35:04,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:04,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:04,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:04,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:04,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:04,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:04,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/8743fcf13e8a46028a993f472a3ccbf3, entries=150, sequenceid=135, filesize=11.8 K 2024-11-26T10:35:04,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1259ms, sequenceid=135, compaction requested=true 2024-11-26T10:35:04,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:04,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:04,081 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:04,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:04,081 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:04,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:04,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:04,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:04,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:04,081 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101922 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:04,081 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:04,081 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/A is initiating minor compaction (all files) 2024-11-26T10:35:04,082 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/B is initiating minor compaction (all files) 2024-11-26T10:35:04,082 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/A in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:04,082 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/B in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:04,082 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/872cc1746c354434aebec3b77df6ef76, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/13a4f8a32f6a4dbb812656ef8ab6d702, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/10fde9c06b124c3ca408be76b96ee11b] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=35.5 K 2024-11-26T10:35:04,082 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/75919e567cc54c34a6eeea9b4ef17456, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/ae67b366b4ee4bc38f59f6a6f93b0737, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/694ada3d618745a59ccde96d6178faaf] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=99.5 K 2024-11-26T10:35:04,082 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:04,082 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/75919e567cc54c34a6eeea9b4ef17456, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/ae67b366b4ee4bc38f59f6a6f93b0737, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/694ada3d618745a59ccde96d6178faaf] 2024-11-26T10:35:04,082 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 872cc1746c354434aebec3b77df6ef76, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732617297839 2024-11-26T10:35:04,082 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75919e567cc54c34a6eeea9b4ef17456, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732617297839 2024-11-26T10:35:04,082 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 13a4f8a32f6a4dbb812656ef8ab6d702, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732617300677 2024-11-26T10:35:04,082 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae67b366b4ee4bc38f59f6a6f93b0737, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732617300677 2024-11-26T10:35:04,082 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 10fde9c06b124c3ca408be76b96ee11b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732617302762 2024-11-26T10:35:04,083 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 694ada3d618745a59ccde96d6178faaf, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732617302762 2024-11-26T10:35:04,088 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:04,090 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#B#compaction#318 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:04,090 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/cad4fe7ae6d549908e35bf4f51676287 is 50, key is test_row_0/B:col10/1732617302762/Put/seqid=0 2024-11-26T10:35:04,091 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411269ac7946a397a48d08e7bb6cd2996a51d_061adf5fb3bc2e9358b6d3d5a6c93c59 store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:04,092 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411269ac7946a397a48d08e7bb6cd2996a51d_061adf5fb3bc2e9358b6d3d5a6c93c59, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:04,092 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411269ac7946a397a48d08e7bb6cd2996a51d_061adf5fb3bc2e9358b6d3d5a6c93c59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:04,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742197_1373 (size=12409) 2024-11-26T10:35:04,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742198_1374 (size=4469) 2024-11-26T10:35:04,108 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#A#compaction#317 average throughput is 1.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:04,109 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/c514320df286492b836bff111a023697 is 175, key is test_row_0/A:col10/1732617302762/Put/seqid=0 2024-11-26T10:35:04,114 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/cad4fe7ae6d549908e35bf4f51676287 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/cad4fe7ae6d549908e35bf4f51676287 2024-11-26T10:35:04,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742199_1375 (size=31363) 2024-11-26T10:35:04,119 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/B of 061adf5fb3bc2e9358b6d3d5a6c93c59 into cad4fe7ae6d549908e35bf4f51676287(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:04,119 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:04,119 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/B, priority=13, startTime=1732617304081; duration=0sec 2024-11-26T10:35:04,119 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:04,119 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:B 2024-11-26T10:35:04,119 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:04,121 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:04,121 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/C is initiating minor compaction (all files) 2024-11-26T10:35:04,122 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/C in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:04,122 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/d93fc883278541cdb1d803cae1d13895, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/9dc5468999a64de395a7ad570ea7d024, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/8743fcf13e8a46028a993f472a3ccbf3] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=35.5 K 2024-11-26T10:35:04,122 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting d93fc883278541cdb1d803cae1d13895, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732617297839 2024-11-26T10:35:04,123 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 9dc5468999a64de395a7ad570ea7d024, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732617300677 2024-11-26T10:35:04,124 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 8743fcf13e8a46028a993f472a3ccbf3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732617302762 2024-11-26T10:35:04,125 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/c514320df286492b836bff111a023697 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/c514320df286492b836bff111a023697 2024-11-26T10:35:04,131 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/A of 061adf5fb3bc2e9358b6d3d5a6c93c59 into c514320df286492b836bff111a023697(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:04,131 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:04,131 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/A, priority=13, startTime=1732617304080; duration=0sec 2024-11-26T10:35:04,131 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:04,131 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:A 2024-11-26T10:35:04,133 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#C#compaction#319 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:04,134 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/2bffd4cee0b04ba298052563cdad66a5 is 50, key is test_row_0/C:col10/1732617302762/Put/seqid=0 2024-11-26T10:35:04,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742200_1376 (size=12409) 2024-11-26T10:35:04,145 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/2bffd4cee0b04ba298052563cdad66a5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2bffd4cee0b04ba298052563cdad66a5 2024-11-26T10:35:04,149 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/C of 061adf5fb3bc2e9358b6d3d5a6c93c59 into 2bffd4cee0b04ba298052563cdad66a5(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:04,149 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:04,149 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/C, priority=13, startTime=1732617304081; duration=0sec 2024-11-26T10:35:04,149 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:04,149 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:C 2024-11-26T10:35:04,224 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:04,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-26T10:35:04,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:04,225 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-26T10:35:04,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:35:04,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:04,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:35:04,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:04,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:35:04,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:04,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126f9ba89bf2d244308995baa6562d321f5_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617302918/Put/seqid=0 2024-11-26T10:35:04,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742201_1377 (size=12304) 2024-11-26T10:35:04,344 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:35:04,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:04,661 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126f9ba89bf2d244308995baa6562d321f5_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f9ba89bf2d244308995baa6562d321f5_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:04,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/20789eac5a0540f0965c76a8132961ea, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:04,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/20789eac5a0540f0965c76a8132961ea is 175, key is test_row_0/A:col10/1732617302918/Put/seqid=0 2024-11-26T10:35:04,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742202_1378 (size=31105) 2024-11-26T10:35:04,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:04,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:04,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:04,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617364836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:04,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:04,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617364839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:04,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:04,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617364841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:04,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:04,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617364942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:04,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:04,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617364942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:04,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:04,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617364946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:04,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-26T10:35:05,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:05,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617365047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:05,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:05,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617365049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:05,067 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/20789eac5a0540f0965c76a8132961ea 2024-11-26T10:35:05,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/584db1c163964757945fceab85f03c37 is 50, key is test_row_0/B:col10/1732617302918/Put/seqid=0 2024-11-26T10:35:05,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742203_1379 (size=12151) 2024-11-26T10:35:05,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:05,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617365146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:05,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:05,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617365146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:05,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:05,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617365151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:05,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:05,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617365451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:05,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:05,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617365453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:05,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:05,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617365455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:05,476 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/584db1c163964757945fceab85f03c37 2024-11-26T10:35:05,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/2d4a20988aa74942a07765f6ccf167d0 is 50, key is test_row_0/C:col10/1732617302918/Put/seqid=0 2024-11-26T10:35:05,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742204_1380 (size=12151) 2024-11-26T10:35:05,887 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/2d4a20988aa74942a07765f6ccf167d0 2024-11-26T10:35:05,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/20789eac5a0540f0965c76a8132961ea as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/20789eac5a0540f0965c76a8132961ea 2024-11-26T10:35:05,893 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/20789eac5a0540f0965c76a8132961ea, entries=150, sequenceid=160, filesize=30.4 K 2024-11-26T10:35:05,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/584db1c163964757945fceab85f03c37 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/584db1c163964757945fceab85f03c37 2024-11-26T10:35:05,896 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/584db1c163964757945fceab85f03c37, entries=150, sequenceid=160, filesize=11.9 K 2024-11-26T10:35:05,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/2d4a20988aa74942a07765f6ccf167d0 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2d4a20988aa74942a07765f6ccf167d0 2024-11-26T10:35:05,900 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2d4a20988aa74942a07765f6ccf167d0, entries=150, sequenceid=160, filesize=11.9 K 2024-11-26T10:35:05,901 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1675ms, sequenceid=160, compaction requested=false 2024-11-26T10:35:05,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:05,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:05,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-26T10:35:05,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-26T10:35:05,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-26T10:35:05,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0470 sec 2024-11-26T10:35:05,903 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 3.0510 sec 2024-11-26T10:35:05,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:05,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-26T10:35:05,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:35:05,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:05,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:35:05,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:05,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:35:05,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:05,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126d3e109854915495f821261b3ebf7bd9e_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617304834/Put/seqid=0 2024-11-26T10:35:05,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742205_1381 (size=14794) 2024-11-26T10:35:06,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617365993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617365995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617366000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617366101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617366102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617366106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617366309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617366309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617366311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,366 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:06,369 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126d3e109854915495f821261b3ebf7bd9e_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126d3e109854915495f821261b3ebf7bd9e_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:06,370 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/59db8a18aa8a41c8a15cd069d295e4a7, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:06,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/59db8a18aa8a41c8a15cd069d295e4a7 is 175, key is test_row_0/A:col10/1732617304834/Put/seqid=0 2024-11-26T10:35:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742206_1382 (size=39749) 2024-11-26T10:35:06,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617366613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617366613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:06,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617366615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:06,774 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/59db8a18aa8a41c8a15cd069d295e4a7 2024-11-26T10:35:06,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/6ed2d97d12c7453cb008ff29e3e4361c is 50, key is test_row_0/B:col10/1732617304834/Put/seqid=0 2024-11-26T10:35:06,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742207_1383 (size=12151) 2024-11-26T10:35:06,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-26T10:35:06,957 INFO [Thread-1539 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-26T10:35:06,958 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:06,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-26T10:35:06,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-26T10:35:06,959 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:06,960 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:06,960 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:07,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-26T10:35:07,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:07,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617367057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:07,061 DEBUG [Thread-1533 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:07,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:07,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617367060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:07,062 DEBUG [Thread-1531 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:07,111 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:07,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-26T10:35:07,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:07,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:07,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617367119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:07,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:07,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617367119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:07,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:07,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617367123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:07,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/6ed2d97d12c7453cb008ff29e3e4361c 2024-11-26T10:35:07,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/78cb651b71e54faeac4768e1d080ea5c is 50, key is test_row_0/C:col10/1732617304834/Put/seqid=0 2024-11-26T10:35:07,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742208_1384 (size=12151) 2024-11-26T10:35:07,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-26T10:35:07,263 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:07,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-26T10:35:07,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:07,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,415 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:07,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-26T10:35:07,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:07,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-26T10:35:07,568 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:07,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-26T10:35:07,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:07,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:07,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/78cb651b71e54faeac4768e1d080ea5c 2024-11-26T10:35:07,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/59db8a18aa8a41c8a15cd069d295e4a7 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/59db8a18aa8a41c8a15cd069d295e4a7 2024-11-26T10:35:07,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/59db8a18aa8a41c8a15cd069d295e4a7, entries=200, sequenceid=175, filesize=38.8 K 2024-11-26T10:35:07,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/6ed2d97d12c7453cb008ff29e3e4361c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/6ed2d97d12c7453cb008ff29e3e4361c 2024-11-26T10:35:07,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/6ed2d97d12c7453cb008ff29e3e4361c, entries=150, sequenceid=175, filesize=11.9 K 2024-11-26T10:35:07,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/78cb651b71e54faeac4768e1d080ea5c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/78cb651b71e54faeac4768e1d080ea5c 2024-11-26T10:35:07,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/78cb651b71e54faeac4768e1d080ea5c, entries=150, sequenceid=175, filesize=11.9 K 2024-11-26T10:35:07,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1660ms, sequenceid=175, compaction requested=true 2024-11-26T10:35:07,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:07,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:07,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:07,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:07,617 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:07,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:07,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:07,617 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:07,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:07,618 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:07,618 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102217 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:07,618 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/A is initiating minor compaction (all files) 2024-11-26T10:35:07,618 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/B is initiating minor compaction (all files) 2024-11-26T10:35:07,618 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/A in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,618 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/B in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,618 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/cad4fe7ae6d549908e35bf4f51676287, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/584db1c163964757945fceab85f03c37, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/6ed2d97d12c7453cb008ff29e3e4361c] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=35.9 K 2024-11-26T10:35:07,618 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/c514320df286492b836bff111a023697, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/20789eac5a0540f0965c76a8132961ea, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/59db8a18aa8a41c8a15cd069d295e4a7] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=99.8 K 2024-11-26T10:35:07,618 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,618 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/c514320df286492b836bff111a023697, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/20789eac5a0540f0965c76a8132961ea, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/59db8a18aa8a41c8a15cd069d295e4a7] 2024-11-26T10:35:07,619 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting c514320df286492b836bff111a023697, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732617302762 2024-11-26T10:35:07,619 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting cad4fe7ae6d549908e35bf4f51676287, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732617302762 2024-11-26T10:35:07,619 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 584db1c163964757945fceab85f03c37, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732617302908 2024-11-26T10:35:07,619 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20789eac5a0540f0965c76a8132961ea, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732617302908 2024-11-26T10:35:07,619 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59db8a18aa8a41c8a15cd069d295e4a7, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732617304832 2024-11-26T10:35:07,619 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ed2d97d12c7453cb008ff29e3e4361c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732617304834 2024-11-26T10:35:07,624 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:07,625 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#B#compaction#326 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:07,625 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/4580ce431b3e4161afb5ce22d597c36a is 50, key is test_row_0/B:col10/1732617304834/Put/seqid=0 2024-11-26T10:35:07,627 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411268d3a294e327542139c70d3afd45a54ac_061adf5fb3bc2e9358b6d3d5a6c93c59 store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:07,629 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411268d3a294e327542139c70d3afd45a54ac_061adf5fb3bc2e9358b6d3d5a6c93c59, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:07,629 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411268d3a294e327542139c70d3afd45a54ac_061adf5fb3bc2e9358b6d3d5a6c93c59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:07,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742210_1386 (size=12561) 2024-11-26T10:35:07,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742209_1385 (size=4469) 2024-11-26T10:35:07,720 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:07,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-26T10:35:07,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:07,720 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-26T10:35:07,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:35:07,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:07,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:35:07,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:07,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:35:07,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:07,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126caa84bfe939d40adae4f5c303f19a517_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617305994/Put/seqid=0 2024-11-26T10:35:07,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742211_1387 (size=12304) 2024-11-26T10:35:07,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:07,734 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126caa84bfe939d40adae4f5c303f19a517_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126caa84bfe939d40adae4f5c303f19a517_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:07,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/e2b8b62ffc79457e9c83dc38ea9017cb, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:07,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/e2b8b62ffc79457e9c83dc38ea9017cb is 175, key is test_row_0/A:col10/1732617305994/Put/seqid=0 2024-11-26T10:35:07,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742212_1388 (size=31105) 2024-11-26T10:35:08,040 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#A#compaction#327 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:08,040 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/83baf926b1554d3ea02c81ea1702883c is 175, key is test_row_0/A:col10/1732617304834/Put/seqid=0 2024-11-26T10:35:08,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742213_1389 (size=31515) 2024-11-26T10:35:08,044 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/4580ce431b3e4161afb5ce22d597c36a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/4580ce431b3e4161afb5ce22d597c36a 2024-11-26T10:35:08,048 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/B of 061adf5fb3bc2e9358b6d3d5a6c93c59 into 4580ce431b3e4161afb5ce22d597c36a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:08,048 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:08,048 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/B, priority=13, startTime=1732617307617; duration=0sec 2024-11-26T10:35:08,048 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:08,048 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:B 2024-11-26T10:35:08,048 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:08,049 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:08,049 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/C is initiating minor compaction (all files) 2024-11-26T10:35:08,049 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/C in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:08,049 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2bffd4cee0b04ba298052563cdad66a5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2d4a20988aa74942a07765f6ccf167d0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/78cb651b71e54faeac4768e1d080ea5c] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=35.9 K 2024-11-26T10:35:08,049 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bffd4cee0b04ba298052563cdad66a5, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732617302762 2024-11-26T10:35:08,049 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d4a20988aa74942a07765f6ccf167d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732617302908 2024-11-26T10:35:08,050 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 78cb651b71e54faeac4768e1d080ea5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732617304834 2024-11-26T10:35:08,055 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#C#compaction#329 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:08,056 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/0094adf87cdb431ea2045cff6294bf8c is 50, key is test_row_0/C:col10/1732617304834/Put/seqid=0 2024-11-26T10:35:08,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742214_1390 (size=12561) 2024-11-26T10:35:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-26T10:35:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:08,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:08,138 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/e2b8b62ffc79457e9c83dc38ea9017cb 2024-11-26T10:35:08,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/3eb7959029924a489cd223d328501858 is 50, key is test_row_0/B:col10/1732617305994/Put/seqid=0 2024-11-26T10:35:08,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742215_1391 (size=12151) 2024-11-26T10:35:08,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617368152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617368157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617368159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617368260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617368265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617368268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,448 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/83baf926b1554d3ea02c81ea1702883c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/83baf926b1554d3ea02c81ea1702883c 2024-11-26T10:35:08,451 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/A of 061adf5fb3bc2e9358b6d3d5a6c93c59 into 83baf926b1554d3ea02c81ea1702883c(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:08,451 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:08,451 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/A, priority=13, startTime=1732617307617; duration=0sec 2024-11-26T10:35:08,451 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:08,451 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:A 2024-11-26T10:35:08,462 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/0094adf87cdb431ea2045cff6294bf8c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0094adf87cdb431ea2045cff6294bf8c 2024-11-26T10:35:08,466 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/C of 061adf5fb3bc2e9358b6d3d5a6c93c59 into 0094adf87cdb431ea2045cff6294bf8c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:08,466 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:08,466 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/C, priority=13, startTime=1732617307617; duration=0sec 2024-11-26T10:35:08,466 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:08,466 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:C 2024-11-26T10:35:08,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617368466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617368471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617368472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,548 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/3eb7959029924a489cd223d328501858 2024-11-26T10:35:08,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/0abde2d6b5144f2a83a6545f849f8341 is 50, key is test_row_0/C:col10/1732617305994/Put/seqid=0 2024-11-26T10:35:08,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742216_1392 (size=12151) 2024-11-26T10:35:08,711 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/b62599f6950b463491629d5adb37106d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/cbd026c361de4f5880e3236435864ae0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/bff4a61f3aa1462c8fef7e60a1579f7f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/40a2119b2b5f431fb3304d72b53cd2a1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/35af9bfb4f8f40008369bf8b3345bfe0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/75919e567cc54c34a6eeea9b4ef17456, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/9e77a53747b04f6e936732cb2781a10c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/ae67b366b4ee4bc38f59f6a6f93b0737, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/694ada3d618745a59ccde96d6178faaf, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/c514320df286492b836bff111a023697, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/20789eac5a0540f0965c76a8132961ea, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/59db8a18aa8a41c8a15cd069d295e4a7] to archive 2024-11-26T10:35:08,712 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:35:08,713 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/b62599f6950b463491629d5adb37106d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/b62599f6950b463491629d5adb37106d 2024-11-26T10:35:08,714 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/cbd026c361de4f5880e3236435864ae0 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/cbd026c361de4f5880e3236435864ae0 2024-11-26T10:35:08,715 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/bff4a61f3aa1462c8fef7e60a1579f7f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/bff4a61f3aa1462c8fef7e60a1579f7f 2024-11-26T10:35:08,716 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/40a2119b2b5f431fb3304d72b53cd2a1 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/40a2119b2b5f431fb3304d72b53cd2a1 2024-11-26T10:35:08,717 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/35af9bfb4f8f40008369bf8b3345bfe0 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/35af9bfb4f8f40008369bf8b3345bfe0 2024-11-26T10:35:08,717 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/75919e567cc54c34a6eeea9b4ef17456 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/75919e567cc54c34a6eeea9b4ef17456 2024-11-26T10:35:08,718 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/9e77a53747b04f6e936732cb2781a10c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/9e77a53747b04f6e936732cb2781a10c 2024-11-26T10:35:08,719 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/ae67b366b4ee4bc38f59f6a6f93b0737 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/ae67b366b4ee4bc38f59f6a6f93b0737 2024-11-26T10:35:08,719 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/694ada3d618745a59ccde96d6178faaf to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/694ada3d618745a59ccde96d6178faaf 2024-11-26T10:35:08,720 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/c514320df286492b836bff111a023697 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/c514320df286492b836bff111a023697 2024-11-26T10:35:08,721 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/20789eac5a0540f0965c76a8132961ea to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/20789eac5a0540f0965c76a8132961ea 2024-11-26T10:35:08,722 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/59db8a18aa8a41c8a15cd069d295e4a7 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/59db8a18aa8a41c8a15cd069d295e4a7 2024-11-26T10:35:08,723 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/17092df6d25449c9b3ab168a2aba3686, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5dbbe565ff474de69031891656a36d87, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/05d36e347e7644e9a1b2b6c3dfda78ea, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/02a1a5cc4aa24081a78698339bdc901e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/2c796c6bbed440c896a0ddb406576c5f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/872cc1746c354434aebec3b77df6ef76, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/ba51816472864df3ad6c092e6a88a59e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/13a4f8a32f6a4dbb812656ef8ab6d702, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/cad4fe7ae6d549908e35bf4f51676287, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/10fde9c06b124c3ca408be76b96ee11b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/584db1c163964757945fceab85f03c37, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/6ed2d97d12c7453cb008ff29e3e4361c] to archive 2024-11-26T10:35:08,724 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:35:08,725 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/17092df6d25449c9b3ab168a2aba3686 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/17092df6d25449c9b3ab168a2aba3686 2024-11-26T10:35:08,726 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5dbbe565ff474de69031891656a36d87 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5dbbe565ff474de69031891656a36d87 2024-11-26T10:35:08,727 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/05d36e347e7644e9a1b2b6c3dfda78ea to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/05d36e347e7644e9a1b2b6c3dfda78ea 2024-11-26T10:35:08,727 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/02a1a5cc4aa24081a78698339bdc901e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/02a1a5cc4aa24081a78698339bdc901e 2024-11-26T10:35:08,728 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/2c796c6bbed440c896a0ddb406576c5f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/2c796c6bbed440c896a0ddb406576c5f 2024-11-26T10:35:08,729 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/872cc1746c354434aebec3b77df6ef76 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/872cc1746c354434aebec3b77df6ef76 2024-11-26T10:35:08,729 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/ba51816472864df3ad6c092e6a88a59e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/ba51816472864df3ad6c092e6a88a59e 2024-11-26T10:35:08,730 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/13a4f8a32f6a4dbb812656ef8ab6d702 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/13a4f8a32f6a4dbb812656ef8ab6d702 2024-11-26T10:35:08,731 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/cad4fe7ae6d549908e35bf4f51676287 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/cad4fe7ae6d549908e35bf4f51676287 2024-11-26T10:35:08,732 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/10fde9c06b124c3ca408be76b96ee11b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/10fde9c06b124c3ca408be76b96ee11b 2024-11-26T10:35:08,732 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/584db1c163964757945fceab85f03c37 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/584db1c163964757945fceab85f03c37 2024-11-26T10:35:08,733 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/6ed2d97d12c7453cb008ff29e3e4361c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/6ed2d97d12c7453cb008ff29e3e4361c 2024-11-26T10:35:08,734 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0d3987e8aba3495a93b44b58f7e98d05, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/1a9c2e8ea84542fa8d6718e2a07015f9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2a30f70605934fdd9352d0b39dd54931, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/14aa4084f6ff4361bfa0113023ac3705, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/e8e7f258e39e41348242f3c76ede2e4e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/d93fc883278541cdb1d803cae1d13895, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/191c95bbc6e8418f89c1317b94c5e147, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/9dc5468999a64de395a7ad570ea7d024, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2bffd4cee0b04ba298052563cdad66a5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/8743fcf13e8a46028a993f472a3ccbf3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2d4a20988aa74942a07765f6ccf167d0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/78cb651b71e54faeac4768e1d080ea5c] to archive 2024-11-26T10:35:08,735 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:35:08,736 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0d3987e8aba3495a93b44b58f7e98d05 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0d3987e8aba3495a93b44b58f7e98d05 2024-11-26T10:35:08,738 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/1a9c2e8ea84542fa8d6718e2a07015f9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/1a9c2e8ea84542fa8d6718e2a07015f9 2024-11-26T10:35:08,739 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2a30f70605934fdd9352d0b39dd54931 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2a30f70605934fdd9352d0b39dd54931 2024-11-26T10:35:08,740 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/14aa4084f6ff4361bfa0113023ac3705 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/14aa4084f6ff4361bfa0113023ac3705 2024-11-26T10:35:08,740 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/e8e7f258e39e41348242f3c76ede2e4e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/e8e7f258e39e41348242f3c76ede2e4e 2024-11-26T10:35:08,741 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/d93fc883278541cdb1d803cae1d13895 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/d93fc883278541cdb1d803cae1d13895 2024-11-26T10:35:08,743 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/191c95bbc6e8418f89c1317b94c5e147 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/191c95bbc6e8418f89c1317b94c5e147 2024-11-26T10:35:08,743 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/9dc5468999a64de395a7ad570ea7d024 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/9dc5468999a64de395a7ad570ea7d024 2024-11-26T10:35:08,744 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2bffd4cee0b04ba298052563cdad66a5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2bffd4cee0b04ba298052563cdad66a5 2024-11-26T10:35:08,745 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/8743fcf13e8a46028a993f472a3ccbf3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/8743fcf13e8a46028a993f472a3ccbf3 2024-11-26T10:35:08,745 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2d4a20988aa74942a07765f6ccf167d0 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/2d4a20988aa74942a07765f6ccf167d0 2024-11-26T10:35:08,747 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/ccf62758a0a5:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/78cb651b71e54faeac4768e1d080ea5c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/78cb651b71e54faeac4768e1d080ea5c 2024-11-26T10:35:08,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617368773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617368776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617368777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:08,957 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/0abde2d6b5144f2a83a6545f849f8341 2024-11-26T10:35:08,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/e2b8b62ffc79457e9c83dc38ea9017cb as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/e2b8b62ffc79457e9c83dc38ea9017cb 2024-11-26T10:35:08,964 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/e2b8b62ffc79457e9c83dc38ea9017cb, entries=150, sequenceid=196, filesize=30.4 K 2024-11-26T10:35:08,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/3eb7959029924a489cd223d328501858 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3eb7959029924a489cd223d328501858 2024-11-26T10:35:08,968 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3eb7959029924a489cd223d328501858, entries=150, sequenceid=196, filesize=11.9 K 2024-11-26T10:35:08,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/0abde2d6b5144f2a83a6545f849f8341 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0abde2d6b5144f2a83a6545f849f8341 2024-11-26T10:35:08,971 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0abde2d6b5144f2a83a6545f849f8341, entries=150, sequenceid=196, filesize=11.9 K 2024-11-26T10:35:08,972 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1252ms, sequenceid=196, compaction requested=false 2024-11-26T10:35:08,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:08,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:08,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-26T10:35:08,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-26T10:35:08,974 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-26T10:35:08,974 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0130 sec 2024-11-26T10:35:08,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 2.0160 sec 2024-11-26T10:35:09,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-26T10:35:09,062 INFO [Thread-1539 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-26T10:35:09,063 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:09,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-26T10:35:09,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-26T10:35:09,064 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:09,065 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:09,065 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:09,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-26T10:35:09,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-26T10:35:09,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:09,216 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-26T10:35:09,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:35:09,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:09,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:35:09,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:09,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:35:09,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411261074ba1f6eb24500af1e6ccdb5b0b40c_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617308150/Put/seqid=0 2024-11-26T10:35:09,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742217_1393 (size=12304) 2024-11-26T10:35:09,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:09,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:09,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617369313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617369314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617369314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-26T10:35:09,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617369417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617369418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617369420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:09,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617369625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617369626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617369626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,629 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411261074ba1f6eb24500af1e6ccdb5b0b40c_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411261074ba1f6eb24500af1e6ccdb5b0b40c_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:09,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/d19df45a85154e848cbd06025cc98967, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:09,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/d19df45a85154e848cbd06025cc98967 is 175, key is test_row_0/A:col10/1732617308150/Put/seqid=0 2024-11-26T10:35:09,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742218_1394 (size=31105) 2024-11-26T10:35:09,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-26T10:35:09,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617369929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617369930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:09,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617369930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:10,053 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/d19df45a85154e848cbd06025cc98967 2024-11-26T10:35:10,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/712e2d2aca9f48a8a2abb42fb1c6146d is 50, key is test_row_0/B:col10/1732617308150/Put/seqid=0 2024-11-26T10:35:10,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742219_1395 (size=12151) 2024-11-26T10:35:10,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-26T10:35:10,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:10,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617370436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:10,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:10,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617370436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:10,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:10,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617370438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:10,465 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/712e2d2aca9f48a8a2abb42fb1c6146d 2024-11-26T10:35:10,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/daa46a9997404c09af9fc895e6906e16 is 50, key is test_row_0/C:col10/1732617308150/Put/seqid=0 2024-11-26T10:35:10,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742220_1396 (size=12151) 2024-11-26T10:35:10,477 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/daa46a9997404c09af9fc895e6906e16 2024-11-26T10:35:10,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/d19df45a85154e848cbd06025cc98967 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d19df45a85154e848cbd06025cc98967 2024-11-26T10:35:10,482 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d19df45a85154e848cbd06025cc98967, entries=150, sequenceid=214, filesize=30.4 K 2024-11-26T10:35:10,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/712e2d2aca9f48a8a2abb42fb1c6146d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/712e2d2aca9f48a8a2abb42fb1c6146d 2024-11-26T10:35:10,486 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/712e2d2aca9f48a8a2abb42fb1c6146d, entries=150, sequenceid=214, filesize=11.9 K 2024-11-26T10:35:10,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/daa46a9997404c09af9fc895e6906e16 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/daa46a9997404c09af9fc895e6906e16 2024-11-26T10:35:10,489 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/daa46a9997404c09af9fc895e6906e16, entries=150, sequenceid=214, filesize=11.9 K 2024-11-26T10:35:10,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-26T10:35:10,490 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1274ms, sequenceid=214, compaction requested=true 2024-11-26T10:35:10,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:10,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:10,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-26T10:35:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-26T10:35:10,492 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-26T10:35:10,492 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4260 sec 2024-11-26T10:35:10,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.4290 sec 2024-11-26T10:35:11,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:11,074 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-26T10:35:11,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:35:11,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:11,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:35:11,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:11,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:35:11,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:11,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411268dea21e3daec4d82836cfc8cfefc11aa_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617309313/Put/seqid=0 2024-11-26T10:35:11,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742221_1397 (size=14794) 2024-11-26T10:35:11,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:11,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617371111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:11,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617371115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-26T10:35:11,168 INFO [Thread-1539 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-26T10:35:11,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:11,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-26T10:35:11,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-26T10:35:11,170 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:11,170 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:11,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:11,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:11,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617371219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:11,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617371225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-26T10:35:11,322 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-26T10:35:11,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:11,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:11,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:11,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617371423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:11,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617371430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617371441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:11,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617371446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617371445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-26T10:35:11,474 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-26T10:35:11,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:11,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:11,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:11,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,483 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:11,486 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411268dea21e3daec4d82836cfc8cfefc11aa_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411268dea21e3daec4d82836cfc8cfefc11aa_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:11,487 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/af40567e515c4b6196ea03422719eff1, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:11,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/af40567e515c4b6196ea03422719eff1 is 175, key is test_row_0/A:col10/1732617309313/Put/seqid=0 2024-11-26T10:35:11,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742222_1398 (size=39749) 2024-11-26T10:35:11,626 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-26T10:35:11,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:11,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:11,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:11,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:11,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617371732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:11,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617371738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-26T10:35:11,779 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-26T10:35:11,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:11,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:11,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:11,779 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,891 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/af40567e515c4b6196ea03422719eff1 2024-11-26T10:35:11,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/5ea83f0029e74087be66dc506fab9431 is 50, key is test_row_0/B:col10/1732617309313/Put/seqid=0 2024-11-26T10:35:11,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742223_1399 (size=12151) 2024-11-26T10:35:11,931 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:11,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-26T10:35:11,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:11,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:11,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:11,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:11,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,083 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:12,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-26T10:35:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,084 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,235 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:12,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-26T10:35:12,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:12,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:12,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617372240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:12,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:12,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617372246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:12,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-26T10:35:12,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/5ea83f0029e74087be66dc506fab9431 2024-11-26T10:35:12,304 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/19f5f034a6d947e791257e2e46dc9818 is 50, key is test_row_0/C:col10/1732617309313/Put/seqid=0 2024-11-26T10:35:12,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742224_1400 (size=12151) 2024-11-26T10:35:12,388 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:12,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-26T10:35:12,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:12,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,540 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:12,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-26T10:35:12,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:12,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,693 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:12,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-26T10:35:12,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:12,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:12,707 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/19f5f034a6d947e791257e2e46dc9818 2024-11-26T10:35:12,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/af40567e515c4b6196ea03422719eff1 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/af40567e515c4b6196ea03422719eff1 2024-11-26T10:35:12,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/af40567e515c4b6196ea03422719eff1, entries=200, sequenceid=236, filesize=38.8 K 2024-11-26T10:35:12,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/5ea83f0029e74087be66dc506fab9431 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5ea83f0029e74087be66dc506fab9431 2024-11-26T10:35:12,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5ea83f0029e74087be66dc506fab9431, entries=150, sequenceid=236, filesize=11.9 K 2024-11-26T10:35:12,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/19f5f034a6d947e791257e2e46dc9818 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/19f5f034a6d947e791257e2e46dc9818 2024-11-26T10:35:12,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/19f5f034a6d947e791257e2e46dc9818, entries=150, sequenceid=236, filesize=11.9 K 2024-11-26T10:35:12,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1646ms, sequenceid=236, compaction requested=true 2024-11-26T10:35:12,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:12,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:12,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:12,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:12,720 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:35:12,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:12,720 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:35:12,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:12,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:12,721 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133474 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:35:12,721 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:35:12,721 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/B is initiating minor compaction (all files) 2024-11-26T10:35:12,721 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/A is initiating minor compaction (all files) 2024-11-26T10:35:12,721 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/B in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,721 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/A in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,721 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/4580ce431b3e4161afb5ce22d597c36a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3eb7959029924a489cd223d328501858, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/712e2d2aca9f48a8a2abb42fb1c6146d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5ea83f0029e74087be66dc506fab9431] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=47.9 K 2024-11-26T10:35:12,721 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/83baf926b1554d3ea02c81ea1702883c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/e2b8b62ffc79457e9c83dc38ea9017cb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d19df45a85154e848cbd06025cc98967, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/af40567e515c4b6196ea03422719eff1] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=130.3 K 2024-11-26T10:35:12,721 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,721 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/83baf926b1554d3ea02c81ea1702883c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/e2b8b62ffc79457e9c83dc38ea9017cb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d19df45a85154e848cbd06025cc98967, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/af40567e515c4b6196ea03422719eff1] 2024-11-26T10:35:12,722 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 4580ce431b3e4161afb5ce22d597c36a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732617304834 2024-11-26T10:35:12,722 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83baf926b1554d3ea02c81ea1702883c, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732617304834 2024-11-26T10:35:12,722 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 3eb7959029924a489cd223d328501858, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732617305974 2024-11-26T10:35:12,722 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2b8b62ffc79457e9c83dc38ea9017cb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732617305974 2024-11-26T10:35:12,722 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 712e2d2aca9f48a8a2abb42fb1c6146d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617308150 2024-11-26T10:35:12,722 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting d19df45a85154e848cbd06025cc98967, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617308150 2024-11-26T10:35:12,722 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ea83f0029e74087be66dc506fab9431, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617309300 2024-11-26T10:35:12,722 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting af40567e515c4b6196ea03422719eff1, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617309300 2024-11-26T10:35:12,727 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:12,728 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#B#compaction#338 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:12,729 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/d09ff4d3a70a433da77d0605c0d996d1 is 50, key is test_row_0/B:col10/1732617309313/Put/seqid=0 2024-11-26T10:35:12,734 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411263fa32e5d96504260b641714638192a19_061adf5fb3bc2e9358b6d3d5a6c93c59 store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:12,737 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411263fa32e5d96504260b641714638192a19_061adf5fb3bc2e9358b6d3d5a6c93c59, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:12,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742225_1401 (size=12289) 2024-11-26T10:35:12,737 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411263fa32e5d96504260b641714638192a19_061adf5fb3bc2e9358b6d3d5a6c93c59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:12,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742226_1402 (size=4469) 2024-11-26T10:35:12,845 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:12,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-26T10:35:12,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:12,846 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-26T10:35:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:35:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:35:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:35:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:12,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126bca765f8b1db44398eeaded8f0ef965b_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617311101/Put/seqid=0 2024-11-26T10:35:12,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742227_1403 (size=12304) 2024-11-26T10:35:13,141 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#A#compaction#339 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:13,142 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/d3e60ad68b584df6ad5c1570122374b8 is 175, key is test_row_0/A:col10/1732617309313/Put/seqid=0 2024-11-26T10:35:13,142 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/d09ff4d3a70a433da77d0605c0d996d1 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/d09ff4d3a70a433da77d0605c0d996d1 2024-11-26T10:35:13,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742228_1404 (size=31243) 2024-11-26T10:35:13,147 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/B of 061adf5fb3bc2e9358b6d3d5a6c93c59 into d09ff4d3a70a433da77d0605c0d996d1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:13,147 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:13,147 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/B, priority=12, startTime=1732617312720; duration=0sec 2024-11-26T10:35:13,147 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:13,147 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:B 2024-11-26T10:35:13,147 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:35:13,148 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:35:13,148 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/C is initiating minor compaction (all files) 2024-11-26T10:35:13,148 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/C in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:13,148 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0094adf87cdb431ea2045cff6294bf8c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0abde2d6b5144f2a83a6545f849f8341, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/daa46a9997404c09af9fc895e6906e16, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/19f5f034a6d947e791257e2e46dc9818] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=47.9 K 2024-11-26T10:35:13,148 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 0094adf87cdb431ea2045cff6294bf8c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732617304834 2024-11-26T10:35:13,148 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 0abde2d6b5144f2a83a6545f849f8341, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732617305974 2024-11-26T10:35:13,149 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting daa46a9997404c09af9fc895e6906e16, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732617308150 2024-11-26T10:35:13,149 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 19f5f034a6d947e791257e2e46dc9818, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617309300 2024-11-26T10:35:13,157 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#C#compaction#341 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:13,158 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/ca0ebff2eaea4124afc4f39133d765ec is 50, key is test_row_0/C:col10/1732617309313/Put/seqid=0 2024-11-26T10:35:13,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742229_1405 (size=12289) 2024-11-26T10:35:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:13,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. as already flushing 2024-11-26T10:35:13,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:13,257 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126bca765f8b1db44398eeaded8f0ef965b_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126bca765f8b1db44398eeaded8f0ef965b_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:13,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/762aabfe03c34793925530eb434da56b, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:13,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/762aabfe03c34793925530eb434da56b is 175, key is test_row_0/A:col10/1732617311101/Put/seqid=0 2024-11-26T10:35:13,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742230_1406 (size=31105) 2024-11-26T10:35:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-26T10:35:13,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:13,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617373337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:13,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:13,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617373337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:13,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617373443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:13,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617373443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:13,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:13,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51312 deadline: 1732617373455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:13,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51382 deadline: 1732617373455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:13,460 DEBUG [Thread-1537 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:13,460 DEBUG [Thread-1535 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:13,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:13,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51374 deadline: 1732617373458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:13,461 DEBUG [Thread-1529 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:13,549 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/d3e60ad68b584df6ad5c1570122374b8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d3e60ad68b584df6ad5c1570122374b8 2024-11-26T10:35:13,552 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/A of 061adf5fb3bc2e9358b6d3d5a6c93c59 into d3e60ad68b584df6ad5c1570122374b8(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:13,552 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:13,552 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/A, priority=12, startTime=1732617312720; duration=0sec 2024-11-26T10:35:13,552 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:13,552 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:A 2024-11-26T10:35:13,564 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/ca0ebff2eaea4124afc4f39133d765ec as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/ca0ebff2eaea4124afc4f39133d765ec 2024-11-26T10:35:13,568 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/C of 061adf5fb3bc2e9358b6d3d5a6c93c59 into ca0ebff2eaea4124afc4f39133d765ec(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:13,568 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:13,568 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/C, priority=12, startTime=1732617312720; duration=0sec 2024-11-26T10:35:13,568 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:13,568 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:C 2024-11-26T10:35:13,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617373648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:13,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:13,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617373650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:13,661 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/762aabfe03c34793925530eb434da56b 2024-11-26T10:35:13,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/06db2b67dd95447b8a5f65cdec026cfa is 50, key is test_row_0/B:col10/1732617311101/Put/seqid=0 2024-11-26T10:35:13,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742231_1407 (size=12151) 2024-11-26T10:35:13,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:13,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617373952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:13,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:13,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617373956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:14,071 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/06db2b67dd95447b8a5f65cdec026cfa 2024-11-26T10:35:14,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/aaf6dd0bc9e043aa96617c34b5f11639 is 50, key is test_row_0/C:col10/1732617311101/Put/seqid=0 2024-11-26T10:35:14,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742232_1408 (size=12151) 2024-11-26T10:35:14,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:14,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51346 deadline: 1732617374456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:14,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:14,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51326 deadline: 1732617374464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:14,480 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/aaf6dd0bc9e043aa96617c34b5f11639 2024-11-26T10:35:14,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/762aabfe03c34793925530eb434da56b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/762aabfe03c34793925530eb434da56b 2024-11-26T10:35:14,486 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/762aabfe03c34793925530eb434da56b, entries=150, sequenceid=250, filesize=30.4 K 2024-11-26T10:35:14,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/06db2b67dd95447b8a5f65cdec026cfa as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/06db2b67dd95447b8a5f65cdec026cfa 2024-11-26T10:35:14,490 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/06db2b67dd95447b8a5f65cdec026cfa, entries=150, sequenceid=250, filesize=11.9 K 2024-11-26T10:35:14,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/aaf6dd0bc9e043aa96617c34b5f11639 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/aaf6dd0bc9e043aa96617c34b5f11639 2024-11-26T10:35:14,492 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/aaf6dd0bc9e043aa96617c34b5f11639, entries=150, sequenceid=250, filesize=11.9 K 2024-11-26T10:35:14,493 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1648ms, sequenceid=250, compaction requested=false 2024-11-26T10:35:14,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:14,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:14,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-26T10:35:14,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-26T10:35:14,495 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-26T10:35:14,495 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3230 sec 2024-11-26T10:35:14,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 3.3260 sec 2024-11-26T10:35:14,533 DEBUG [Thread-1542 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f49665c to 127.0.0.1:61934 2024-11-26T10:35:14,533 DEBUG [Thread-1542 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:14,534 DEBUG [Thread-1546 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e4d3d0 to 127.0.0.1:61934 2024-11-26T10:35:14,534 DEBUG [Thread-1546 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:14,537 DEBUG [Thread-1548 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b308f62 to 127.0.0.1:61934 2024-11-26T10:35:14,537 DEBUG [Thread-1548 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:14,537 DEBUG [Thread-1540 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5aee939b to 127.0.0.1:61934 2024-11-26T10:35:14,537 DEBUG [Thread-1540 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:14,538 DEBUG [Thread-1544 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683f8469 to 127.0.0.1:61934 2024-11-26T10:35:14,538 DEBUG [Thread-1544 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:15,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-26T10:35:15,275 INFO [Thread-1539 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-26T10:35:15,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:15,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-26T10:35:15,465 DEBUG [Thread-1533 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ec15031 to 127.0.0.1:61934 2024-11-26T10:35:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:35:15,465 DEBUG [Thread-1533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:35:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:35:15,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:15,472 DEBUG [Thread-1531 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b6cf8cb to 127.0.0.1:61934 2024-11-26T10:35:15,472 DEBUG [Thread-1531 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:15,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126f559849d692e4987b24a08cb4311ef6d_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617313329/Put/seqid=0 2024-11-26T10:35:15,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742233_1409 (size=12454) 2024-11-26T10:35:15,879 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:15,888 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126f559849d692e4987b24a08cb4311ef6d_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f559849d692e4987b24a08cb4311ef6d_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:15,889 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/fef75e90759940508d2e74ef3c1f170d, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:15,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/fef75e90759940508d2e74ef3c1f170d is 175, key is test_row_0/A:col10/1732617313329/Put/seqid=0 2024-11-26T10:35:15,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742234_1410 (size=31255) 2024-11-26T10:35:16,295 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/fef75e90759940508d2e74ef3c1f170d 2024-11-26T10:35:16,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/3f4735e0db764b888eafcea562f9f027 is 50, key is test_row_0/B:col10/1732617313329/Put/seqid=0 2024-11-26T10:35:16,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742235_1411 (size=12301) 2024-11-26T10:35:16,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/3f4735e0db764b888eafcea562f9f027 2024-11-26T10:35:16,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/616c21d7b1cb430191981124d7d18026 is 50, key is test_row_0/C:col10/1732617313329/Put/seqid=0 2024-11-26T10:35:16,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742236_1412 (size=12301) 2024-11-26T10:35:17,129 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/616c21d7b1cb430191981124d7d18026 2024-11-26T10:35:17,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/fef75e90759940508d2e74ef3c1f170d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/fef75e90759940508d2e74ef3c1f170d 2024-11-26T10:35:17,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/fef75e90759940508d2e74ef3c1f170d, entries=150, sequenceid=277, filesize=30.5 K 2024-11-26T10:35:17,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/3f4735e0db764b888eafcea562f9f027 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3f4735e0db764b888eafcea562f9f027 2024-11-26T10:35:17,150 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3f4735e0db764b888eafcea562f9f027, entries=150, sequenceid=277, filesize=12.0 K 2024-11-26T10:35:17,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/616c21d7b1cb430191981124d7d18026 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/616c21d7b1cb430191981124d7d18026 2024-11-26T10:35:17,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/616c21d7b1cb430191981124d7d18026, entries=150, sequenceid=277, filesize=12.0 K 2024-11-26T10:35:17,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=6.71 KB/6870 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1689ms, sequenceid=277, compaction requested=true 2024-11-26T10:35:17,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:17,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:17,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:17,154 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:17,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:17,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:17,155 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:17,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 061adf5fb3bc2e9358b6d3d5a6c93c59:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:17,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:17,155 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93603 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:17,155 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36741 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:17,155 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/A is initiating minor compaction (all files) 2024-11-26T10:35:17,155 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/B is initiating minor compaction (all files) 2024-11-26T10:35:17,155 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/A in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:17,155 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/B in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:17,155 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/d09ff4d3a70a433da77d0605c0d996d1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/06db2b67dd95447b8a5f65cdec026cfa, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3f4735e0db764b888eafcea562f9f027] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=35.9 K 2024-11-26T10:35:17,155 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d3e60ad68b584df6ad5c1570122374b8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/762aabfe03c34793925530eb434da56b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/fef75e90759940508d2e74ef3c1f170d] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=91.4 K 2024-11-26T10:35:17,155 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:17,155 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d3e60ad68b584df6ad5c1570122374b8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/762aabfe03c34793925530eb434da56b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/fef75e90759940508d2e74ef3c1f170d] 2024-11-26T10:35:17,156 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting d09ff4d3a70a433da77d0605c0d996d1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617309300 2024-11-26T10:35:17,156 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting d3e60ad68b584df6ad5c1570122374b8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617309300 2024-11-26T10:35:17,156 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06db2b67dd95447b8a5f65cdec026cfa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732617311101 2024-11-26T10:35:17,156 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 762aabfe03c34793925530eb434da56b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732617311101 2024-11-26T10:35:17,156 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f4735e0db764b888eafcea562f9f027, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732617313299 2024-11-26T10:35:17,156 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting fef75e90759940508d2e74ef3c1f170d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732617313299 2024-11-26T10:35:17,161 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:17,162 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#B#compaction#347 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:17,162 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112657bcccbeb78b4351a131e254f8db91b9_061adf5fb3bc2e9358b6d3d5a6c93c59 store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:17,162 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/a04ee9ab9e4f4a9688dbf150dec9d68e is 50, key is test_row_0/B:col10/1732617313329/Put/seqid=0 2024-11-26T10:35:17,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742237_1413 (size=12541) 2024-11-26T10:35:17,166 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112657bcccbeb78b4351a131e254f8db91b9_061adf5fb3bc2e9358b6d3d5a6c93c59, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:17,166 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112657bcccbeb78b4351a131e254f8db91b9_061adf5fb3bc2e9358b6d3d5a6c93c59 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:17,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742238_1414 (size=4469) 2024-11-26T10:35:17,491 DEBUG [Thread-1537 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c336ea4 to 127.0.0.1:61934 2024-11-26T10:35:17,491 DEBUG [Thread-1537 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:17,496 DEBUG [Thread-1535 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dd5b441 to 127.0.0.1:61934 2024-11-26T10:35:17,496 DEBUG [Thread-1535 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:17,497 DEBUG [Thread-1529 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b82ba2a to 127.0.0.1:61934 2024-11-26T10:35:17,497 DEBUG [Thread-1529 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:17,497 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-26T10:35:17,497 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-11-26T10:35:17,497 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2681 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8043 rows 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2679 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8037 rows 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2672 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8016 rows 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2676 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8028 rows 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2672 2024-11-26T10:35:17,498 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8015 rows 2024-11-26T10:35:17,498 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-26T10:35:17,498 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b976e1a to 127.0.0.1:61934 2024-11-26T10:35:17,498 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:17,500 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-26T10:35:17,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-26T10:35:17,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:17,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-26T10:35:17,504 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617317504"}]},"ts":"1732617317504"} 2024-11-26T10:35:17,505 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-26T10:35:17,551 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-26T10:35:17,553 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-26T10:35:17,555 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=061adf5fb3bc2e9358b6d3d5a6c93c59, UNASSIGN}] 2024-11-26T10:35:17,556 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=061adf5fb3bc2e9358b6d3d5a6c93c59, UNASSIGN 2024-11-26T10:35:17,558 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=061adf5fb3bc2e9358b6d3d5a6c93c59, regionState=CLOSING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:17,559 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-26T10:35:17,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; CloseRegionProcedure 061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:35:17,571 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#A#compaction#348 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:17,572 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/b5d21c600e144863bb7b4678ac8f586c is 175, key is test_row_0/A:col10/1732617313329/Put/seqid=0 2024-11-26T10:35:17,573 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/a04ee9ab9e4f4a9688dbf150dec9d68e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/a04ee9ab9e4f4a9688dbf150dec9d68e 2024-11-26T10:35:17,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742239_1415 (size=31495) 2024-11-26T10:35:17,579 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/B of 061adf5fb3bc2e9358b6d3d5a6c93c59 into a04ee9ab9e4f4a9688dbf150dec9d68e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:17,579 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:17,579 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/B, priority=13, startTime=1732617317154; duration=0sec 2024-11-26T10:35:17,579 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:17,579 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:B 2024-11-26T10:35:17,579 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:17,580 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36741 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:17,580 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 061adf5fb3bc2e9358b6d3d5a6c93c59/C is initiating minor compaction (all files) 2024-11-26T10:35:17,580 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 061adf5fb3bc2e9358b6d3d5a6c93c59/C in TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:17,580 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/ca0ebff2eaea4124afc4f39133d765ec, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/aaf6dd0bc9e043aa96617c34b5f11639, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/616c21d7b1cb430191981124d7d18026] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp, totalSize=35.9 K 2024-11-26T10:35:17,581 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca0ebff2eaea4124afc4f39133d765ec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732617309300 2024-11-26T10:35:17,581 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting aaf6dd0bc9e043aa96617c34b5f11639, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732617311101 2024-11-26T10:35:17,581 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 616c21d7b1cb430191981124d7d18026, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732617313299 2024-11-26T10:35:17,587 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 061adf5fb3bc2e9358b6d3d5a6c93c59#C#compaction#349 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:17,587 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/52ceed9c6cb3412e83ee0a6b2888b86b is 50, key is test_row_0/C:col10/1732617313329/Put/seqid=0 2024-11-26T10:35:17,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742240_1416 (size=12541) 2024-11-26T10:35:17,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-26T10:35:17,712 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:17,713 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(124): Close 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:17,714 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-26T10:35:17,714 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1681): Closing 061adf5fb3bc2e9358b6d3d5a6c93c59, disabling compactions & flushes 2024-11-26T10:35:17,714 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1942): waiting for 2 compactions to complete for region TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:17,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-26T10:35:17,988 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/b5d21c600e144863bb7b4678ac8f586c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/b5d21c600e144863bb7b4678ac8f586c 2024-11-26T10:35:17,995 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/A of 061adf5fb3bc2e9358b6d3d5a6c93c59 into b5d21c600e144863bb7b4678ac8f586c(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:17,995 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:17,995 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/A, priority=13, startTime=1732617317154; duration=0sec 2024-11-26T10:35:17,995 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:17,995 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:A 2024-11-26T10:35:17,995 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/52ceed9c6cb3412e83ee0a6b2888b86b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/52ceed9c6cb3412e83ee0a6b2888b86b 2024-11-26T10:35:18,000 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 061adf5fb3bc2e9358b6d3d5a6c93c59/C of 061adf5fb3bc2e9358b6d3d5a6c93c59 into 52ceed9c6cb3412e83ee0a6b2888b86b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:18,000 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:18,000 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59., storeName=061adf5fb3bc2e9358b6d3d5a6c93c59/C, priority=13, startTime=1732617317155; duration=0sec 2024-11-26T10:35:18,000 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:18,000 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:18,000 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:18,000 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 061adf5fb3bc2e9358b6d3d5a6c93c59:C 2024-11-26T10:35:18,000 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. after waiting 0 ms 2024-11-26T10:35:18,000 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:18,001 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(2837): Flushing 061adf5fb3bc2e9358b6d3d5a6c93c59 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-26T10:35:18,001 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=A 2024-11-26T10:35:18,001 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:18,001 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=B 2024-11-26T10:35:18,001 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:18,001 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 061adf5fb3bc2e9358b6d3d5a6c93c59, store=C 2024-11-26T10:35:18,001 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:18,008 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411264290376978244941ba975afe0e25d21d_061adf5fb3bc2e9358b6d3d5a6c93c59 is 50, key is test_row_0/A:col10/1732617317488/Put/seqid=0 2024-11-26T10:35:18,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742241_1417 (size=12454) 2024-11-26T10:35:18,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-26T10:35:18,413 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:18,422 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411264290376978244941ba975afe0e25d21d_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411264290376978244941ba975afe0e25d21d_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:18,424 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/77aede56863c45dea0f404c39ae74846, store: [table=TestAcidGuarantees family=A region=061adf5fb3bc2e9358b6d3d5a6c93c59] 2024-11-26T10:35:18,425 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/77aede56863c45dea0f404c39ae74846 is 175, key is test_row_0/A:col10/1732617317488/Put/seqid=0 2024-11-26T10:35:18,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742242_1418 (size=31255) 2024-11-26T10:35:18,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-26T10:35:18,830 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=287, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/77aede56863c45dea0f404c39ae74846 2024-11-26T10:35:18,845 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/8547ffc8ae674fc3a05a5e0e3d8a1d98 is 50, key is test_row_0/B:col10/1732617317488/Put/seqid=0 2024-11-26T10:35:18,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742243_1419 (size=12301) 2024-11-26T10:35:19,250 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/8547ffc8ae674fc3a05a5e0e3d8a1d98 2024-11-26T10:35:19,263 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/a1fb85bdfd06499cb525265c5f694628 is 50, key is test_row_0/C:col10/1732617317488/Put/seqid=0 2024-11-26T10:35:19,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742244_1420 (size=12301) 2024-11-26T10:35:19,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-26T10:35:19,669 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/a1fb85bdfd06499cb525265c5f694628 2024-11-26T10:35:19,680 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/A/77aede56863c45dea0f404c39ae74846 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/77aede56863c45dea0f404c39ae74846 2024-11-26T10:35:19,685 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/77aede56863c45dea0f404c39ae74846, entries=150, sequenceid=287, filesize=30.5 K 2024-11-26T10:35:19,686 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/B/8547ffc8ae674fc3a05a5e0e3d8a1d98 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/8547ffc8ae674fc3a05a5e0e3d8a1d98 2024-11-26T10:35:19,691 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/8547ffc8ae674fc3a05a5e0e3d8a1d98, entries=150, sequenceid=287, filesize=12.0 K 2024-11-26T10:35:19,692 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/.tmp/C/a1fb85bdfd06499cb525265c5f694628 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/a1fb85bdfd06499cb525265c5f694628 2024-11-26T10:35:19,697 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/a1fb85bdfd06499cb525265c5f694628, entries=150, sequenceid=287, filesize=12.0 K 2024-11-26T10:35:19,698 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 061adf5fb3bc2e9358b6d3d5a6c93c59 in 1698ms, sequenceid=287, compaction requested=false 2024-11-26T10:35:19,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/83baf926b1554d3ea02c81ea1702883c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/e2b8b62ffc79457e9c83dc38ea9017cb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d19df45a85154e848cbd06025cc98967, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/af40567e515c4b6196ea03422719eff1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d3e60ad68b584df6ad5c1570122374b8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/762aabfe03c34793925530eb434da56b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/fef75e90759940508d2e74ef3c1f170d] to archive 2024-11-26T10:35:19,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:35:19,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/83baf926b1554d3ea02c81ea1702883c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/83baf926b1554d3ea02c81ea1702883c 2024-11-26T10:35:19,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/e2b8b62ffc79457e9c83dc38ea9017cb to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/e2b8b62ffc79457e9c83dc38ea9017cb 2024-11-26T10:35:19,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d19df45a85154e848cbd06025cc98967 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d19df45a85154e848cbd06025cc98967 2024-11-26T10:35:19,706 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/af40567e515c4b6196ea03422719eff1 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/af40567e515c4b6196ea03422719eff1 2024-11-26T10:35:19,708 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d3e60ad68b584df6ad5c1570122374b8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/d3e60ad68b584df6ad5c1570122374b8 2024-11-26T10:35:19,710 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/762aabfe03c34793925530eb434da56b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/762aabfe03c34793925530eb434da56b 2024-11-26T10:35:19,711 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/fef75e90759940508d2e74ef3c1f170d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/fef75e90759940508d2e74ef3c1f170d 2024-11-26T10:35:19,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/4580ce431b3e4161afb5ce22d597c36a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3eb7959029924a489cd223d328501858, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/712e2d2aca9f48a8a2abb42fb1c6146d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/d09ff4d3a70a433da77d0605c0d996d1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5ea83f0029e74087be66dc506fab9431, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/06db2b67dd95447b8a5f65cdec026cfa, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3f4735e0db764b888eafcea562f9f027] to archive 2024-11-26T10:35:19,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:35:19,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/4580ce431b3e4161afb5ce22d597c36a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/4580ce431b3e4161afb5ce22d597c36a 2024-11-26T10:35:19,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3eb7959029924a489cd223d328501858 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3eb7959029924a489cd223d328501858 2024-11-26T10:35:19,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/712e2d2aca9f48a8a2abb42fb1c6146d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/712e2d2aca9f48a8a2abb42fb1c6146d 2024-11-26T10:35:19,722 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/d09ff4d3a70a433da77d0605c0d996d1 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/d09ff4d3a70a433da77d0605c0d996d1 2024-11-26T10:35:19,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5ea83f0029e74087be66dc506fab9431 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/5ea83f0029e74087be66dc506fab9431 2024-11-26T10:35:19,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/06db2b67dd95447b8a5f65cdec026cfa to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/06db2b67dd95447b8a5f65cdec026cfa 2024-11-26T10:35:19,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3f4735e0db764b888eafcea562f9f027 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/3f4735e0db764b888eafcea562f9f027 2024-11-26T10:35:19,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0094adf87cdb431ea2045cff6294bf8c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0abde2d6b5144f2a83a6545f849f8341, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/daa46a9997404c09af9fc895e6906e16, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/ca0ebff2eaea4124afc4f39133d765ec, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/19f5f034a6d947e791257e2e46dc9818, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/aaf6dd0bc9e043aa96617c34b5f11639, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/616c21d7b1cb430191981124d7d18026] to archive 2024-11-26T10:35:19,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:35:19,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0094adf87cdb431ea2045cff6294bf8c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0094adf87cdb431ea2045cff6294bf8c 2024-11-26T10:35:19,730 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0abde2d6b5144f2a83a6545f849f8341 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/0abde2d6b5144f2a83a6545f849f8341 2024-11-26T10:35:19,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/daa46a9997404c09af9fc895e6906e16 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/daa46a9997404c09af9fc895e6906e16 2024-11-26T10:35:19,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/ca0ebff2eaea4124afc4f39133d765ec to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/ca0ebff2eaea4124afc4f39133d765ec 2024-11-26T10:35:19,734 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/19f5f034a6d947e791257e2e46dc9818 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/19f5f034a6d947e791257e2e46dc9818 2024-11-26T10:35:19,735 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/aaf6dd0bc9e043aa96617c34b5f11639 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/aaf6dd0bc9e043aa96617c34b5f11639 2024-11-26T10:35:19,737 DEBUG [StoreCloser-TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/616c21d7b1cb430191981124d7d18026 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/616c21d7b1cb430191981124d7d18026 2024-11-26T10:35:19,741 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/recovered.edits/290.seqid, newMaxSeqId=290, maxSeqId=4 2024-11-26T10:35:19,742 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59. 2024-11-26T10:35:19,742 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1635): Region close journal for 061adf5fb3bc2e9358b6d3d5a6c93c59: 2024-11-26T10:35:19,744 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(170): Closed 061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:19,744 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=061adf5fb3bc2e9358b6d3d5a6c93c59, regionState=CLOSED 2024-11-26T10:35:19,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-26T10:35:19,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseRegionProcedure 061adf5fb3bc2e9358b6d3d5a6c93c59, server=ccf62758a0a5,45419,1732617185877 in 2.1860 sec 2024-11-26T10:35:19,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=119 2024-11-26T10:35:19,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=119, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=061adf5fb3bc2e9358b6d3d5a6c93c59, UNASSIGN in 2.1920 sec 2024-11-26T10:35:19,753 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-26T10:35:19,753 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.1980 sec 2024-11-26T10:35:19,755 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617319754"}]},"ts":"1732617319754"} 2024-11-26T10:35:19,756 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-26T10:35:19,784 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-26T10:35:19,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.2840 sec 2024-11-26T10:35:21,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-26T10:35:21,615 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-26T10:35:21,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-26T10:35:21,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:21,620 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=122, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:21,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-26T10:35:21,621 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=122, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:21,625 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,630 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/recovered.edits] 2024-11-26T10:35:21,635 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/77aede56863c45dea0f404c39ae74846 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/77aede56863c45dea0f404c39ae74846 2024-11-26T10:35:21,636 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/b5d21c600e144863bb7b4678ac8f586c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/A/b5d21c600e144863bb7b4678ac8f586c 2024-11-26T10:35:21,640 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/8547ffc8ae674fc3a05a5e0e3d8a1d98 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/8547ffc8ae674fc3a05a5e0e3d8a1d98 2024-11-26T10:35:21,641 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/a04ee9ab9e4f4a9688dbf150dec9d68e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/B/a04ee9ab9e4f4a9688dbf150dec9d68e 2024-11-26T10:35:21,644 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/52ceed9c6cb3412e83ee0a6b2888b86b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/52ceed9c6cb3412e83ee0a6b2888b86b 2024-11-26T10:35:21,645 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/a1fb85bdfd06499cb525265c5f694628 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/C/a1fb85bdfd06499cb525265c5f694628 2024-11-26T10:35:21,647 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/recovered.edits/290.seqid to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59/recovered.edits/290.seqid 2024-11-26T10:35:21,647 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,648 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-26T10:35:21,648 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-26T10:35:21,648 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-26T10:35:21,650 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411261074ba1f6eb24500af1e6ccdb5b0b40c_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411261074ba1f6eb24500af1e6ccdb5b0b40c_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,651 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411264290376978244941ba975afe0e25d21d_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411264290376978244941ba975afe0e25d21d_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,652 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112664cb4baa430b4c1a9fc4c60ad4fae23d_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112664cb4baa430b4c1a9fc4c60ad4fae23d_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,653 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126793d905c86bb4fa084489a3b1c6cf3d6_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126793d905c86bb4fa084489a3b1c6cf3d6_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,654 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411268dea21e3daec4d82836cfc8cfefc11aa_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411268dea21e3daec4d82836cfc8cfefc11aa_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,655 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112698a815f535b3409ca1a136207b2ca8d9_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112698a815f535b3409ca1a136207b2ca8d9_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,656 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126ba8401bfc265401cb755faca097cf437_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126ba8401bfc265401cb755faca097cf437_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,657 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126bca765f8b1db44398eeaded8f0ef965b_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126bca765f8b1db44398eeaded8f0ef965b_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,659 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126caa84bfe939d40adae4f5c303f19a517_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126caa84bfe939d40adae4f5c303f19a517_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,660 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126d3e109854915495f821261b3ebf7bd9e_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126d3e109854915495f821261b3ebf7bd9e_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,661 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126e9ceda037c9840889048905c32bd9307_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126e9ceda037c9840889048905c32bd9307_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,662 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126ea9fa7b740434dbb8db73eda870a8b26_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126ea9fa7b740434dbb8db73eda870a8b26_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,663 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f559849d692e4987b24a08cb4311ef6d_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f559849d692e4987b24a08cb4311ef6d_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,665 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f9b8648ea576459bb86169e496e88ba5_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f9b8648ea576459bb86169e496e88ba5_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,666 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f9ba89bf2d244308995baa6562d321f5_061adf5fb3bc2e9358b6d3d5a6c93c59 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126f9ba89bf2d244308995baa6562d321f5_061adf5fb3bc2e9358b6d3d5a6c93c59 2024-11-26T10:35:21,667 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-26T10:35:21,669 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=122, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:21,670 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-26T10:35:21,672 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-26T10:35:21,673 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=122, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:21,673 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-26T10:35:21,674 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732617321673"}]},"ts":"9223372036854775807"} 2024-11-26T10:35:21,676 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-26T10:35:21,676 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 061adf5fb3bc2e9358b6d3d5a6c93c59, NAME => 'TestAcidGuarantees,,1732617291285.061adf5fb3bc2e9358b6d3d5a6c93c59.', STARTKEY => '', ENDKEY => ''}] 2024-11-26T10:35:21,676 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-26T10:35:21,676 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732617321676"}]},"ts":"9223372036854775807"} 2024-11-26T10:35:21,678 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-26T10:35:21,718 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=122, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:21,719 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 101 msec 2024-11-26T10:35:21,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-26T10:35:21,722 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-26T10:35:21,731 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=241 (was 239) - Thread LEAK? -, OpenFileDescriptor=457 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=324 (was 317) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5264 (was 5288) 2024-11-26T10:35:21,739 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=324, ProcessCount=11, AvailableMemoryMB=5264 2024-11-26T10:35:21,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-26T10:35:21,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:35:21,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=123, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:21,742 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:35:21,742 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:21,742 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 123 2024-11-26T10:35:21,743 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:35:21,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-26T10:35:21,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742245_1421 (size=963) 2024-11-26T10:35:21,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-26T10:35:22,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-26T10:35:22,153 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1 2024-11-26T10:35:22,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742246_1422 (size=53) 2024-11-26T10:35:22,204 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:35:22,204 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 4d8f0b5b9c9359e0eccc71ea40315b28, disabling compactions & flushes 2024-11-26T10:35:22,204 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:22,204 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:22,204 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. after waiting 0 ms 2024-11-26T10:35:22,205 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:22,205 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:22,205 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:22,206 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:35:22,207 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732617322207"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732617322207"}]},"ts":"1732617322207"} 2024-11-26T10:35:22,209 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-26T10:35:22,210 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:35:22,210 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617322210"}]},"ts":"1732617322210"} 2024-11-26T10:35:22,211 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-26T10:35:22,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-26T10:35:22,376 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4d8f0b5b9c9359e0eccc71ea40315b28, ASSIGN}] 2024-11-26T10:35:22,378 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4d8f0b5b9c9359e0eccc71ea40315b28, ASSIGN 2024-11-26T10:35:22,380 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=4d8f0b5b9c9359e0eccc71ea40315b28, ASSIGN; state=OFFLINE, location=ccf62758a0a5,45419,1732617185877; forceNewPlan=false, retain=false 2024-11-26T10:35:22,531 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=4d8f0b5b9c9359e0eccc71ea40315b28, regionState=OPENING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:22,533 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; OpenRegionProcedure 4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:35:22,688 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:22,694 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:22,694 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7285): Opening region: {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:35:22,695 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:22,695 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:35:22,696 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7327): checking encryption for 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:22,696 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7330): checking classloading for 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:22,699 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:22,701 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:35:22,701 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4d8f0b5b9c9359e0eccc71ea40315b28 columnFamilyName A 2024-11-26T10:35:22,701 DEBUG [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:22,702 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] regionserver.HStore(327): Store=4d8f0b5b9c9359e0eccc71ea40315b28/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:35:22,702 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:22,704 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:35:22,704 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4d8f0b5b9c9359e0eccc71ea40315b28 columnFamilyName B 2024-11-26T10:35:22,704 DEBUG [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:22,705 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] regionserver.HStore(327): Store=4d8f0b5b9c9359e0eccc71ea40315b28/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:35:22,705 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:22,707 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:35:22,707 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4d8f0b5b9c9359e0eccc71ea40315b28 columnFamilyName C 2024-11-26T10:35:22,707 DEBUG [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:22,708 INFO [StoreOpener-4d8f0b5b9c9359e0eccc71ea40315b28-1 {}] regionserver.HStore(327): Store=4d8f0b5b9c9359e0eccc71ea40315b28/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:35:22,708 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:22,709 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:22,709 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:22,711 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:35:22,712 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1085): writing seq id for 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:22,714 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:35:22,714 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1102): Opened 4d8f0b5b9c9359e0eccc71ea40315b28; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59676001, jitterRate=-0.11075828969478607}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:35:22,715 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1001): Region open journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:22,716 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., pid=125, masterSystemTime=1732617322687 2024-11-26T10:35:22,717 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:22,718 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:22,718 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=4d8f0b5b9c9359e0eccc71ea40315b28, regionState=OPEN, openSeqNum=2, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:22,721 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-26T10:35:22,721 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; OpenRegionProcedure 4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 in 186 msec 2024-11-26T10:35:22,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-11-26T10:35:22,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4d8f0b5b9c9359e0eccc71ea40315b28, ASSIGN in 345 msec 2024-11-26T10:35:22,723 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:35:22,724 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617322723"}]},"ts":"1732617322723"} 2024-11-26T10:35:22,725 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-26T10:35:22,769 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:35:22,772 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.0280 sec 2024-11-26T10:35:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-26T10:35:22,849 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 123 completed 2024-11-26T10:35:22,852 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c60eb7d to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@695c2253 2024-11-26T10:35:22,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63cefe40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:22,862 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:22,864 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:22,865 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:35:22,866 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47940, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:35:22,868 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79b10416 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7177efc9 2024-11-26T10:35:22,876 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65df2359, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:22,877 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f142b04 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61d38088 2024-11-26T10:35:22,885 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d0ab200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:22,886 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0de9f076 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7043f683 2024-11-26T10:35:22,894 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5871c039, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:22,895 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4414259d to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b0c2472 2024-11-26T10:35:22,902 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7daa5922, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:22,903 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ed69825 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34b30c39 2024-11-26T10:35:22,911 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7f20c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:22,912 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d672ed2 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f7c40ba 2024-11-26T10:35:22,918 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2070263a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:22,919 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cf40102 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@41b0e7b6 2024-11-26T10:35:22,927 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6050584c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:22,928 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x496fe03f to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f2423f3 2024-11-26T10:35:22,935 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd48863, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:22,935 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3652e74d to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@184771cf 2024-11-26T10:35:22,943 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51196534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:22,944 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2405c04e to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76f0408 2024-11-26T10:35:22,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc5e114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:22,954 DEBUG [hconnection-0x244839e6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:22,954 DEBUG [hconnection-0x6f756c61-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:22,954 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:22,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-26T10:35:22,955 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34786, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:22,955 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:22,956 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:22,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-26T10:35:22,956 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:22,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:22,959 DEBUG [hconnection-0x593cba83-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:22,959 DEBUG [hconnection-0x5c6b073d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:22,959 DEBUG [hconnection-0x60012b37-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:22,959 DEBUG [hconnection-0x7c488079-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:22,959 DEBUG [hconnection-0x532c78fe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:22,960 DEBUG [hconnection-0x7d496b83-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:22,960 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34798, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:22,960 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34810, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:22,960 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:22,960 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34828, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:22,960 DEBUG [hconnection-0x36a01d02-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:22,960 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34844, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:22,960 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:22,961 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:22,962 DEBUG [hconnection-0x2631425b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:22,963 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34880, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:22,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:22,966 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-26T10:35:22,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:22,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:22,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:22,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:22,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:22,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:22,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:22,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:22,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617382974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:22,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617382975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:22,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:22,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617382975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:22,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:22,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:22,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617382976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:22,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617382976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/153fcf6dff8f433c90bd5e7ca8387ea3 is 50, key is test_row_0/A:col10/1732617322965/Put/seqid=0 2024-11-26T10:35:23,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742247_1423 (size=14341) 2024-11-26T10:35:23,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-26T10:35:23,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617383076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617383076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617383080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617383083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617383084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,107 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-26T10:35:23,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:23,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-26T10:35:23,260 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-26T10:35:23,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:23,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617383279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617383280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617383282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617383286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617383286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,412 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-26T10:35:23,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/153fcf6dff8f433c90bd5e7ca8387ea3 2024-11-26T10:35:23,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:23,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/9289fa94d87e4c6d8549e0eda12dd9bc is 50, key is test_row_0/B:col10/1732617322965/Put/seqid=0 2024-11-26T10:35:23,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742248_1424 (size=12001) 2024-11-26T10:35:23,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-26T10:35:23,564 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-26T10:35:23,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:23,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,565 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617383583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617383584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617383585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617383588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:23,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617383590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,716 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-26T10:35:23,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:23,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,717 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,842 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/9289fa94d87e4c6d8549e0eda12dd9bc 2024-11-26T10:35:23,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/61c94bfcc0994e0180f22a89a7a1a5f2 is 50, key is test_row_0/C:col10/1732617322965/Put/seqid=0 2024-11-26T10:35:23,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742249_1425 (size=12001) 2024-11-26T10:35:23,868 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:23,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-26T10:35:23,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:23,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:23,869 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:23,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:24,020 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:24,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-26T10:35:24,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:24,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:24,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:24,021 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:24,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:24,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:24,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-26T10:35:24,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:24,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617384087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:24,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:24,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617384087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:24,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:24,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617384090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:24,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:24,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617384091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:24,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:24,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617384098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:24,173 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:24,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-26T10:35:24,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:24,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:24,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:24,173 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:24,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:24,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:24,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/61c94bfcc0994e0180f22a89a7a1a5f2 2024-11-26T10:35:24,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/153fcf6dff8f433c90bd5e7ca8387ea3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/153fcf6dff8f433c90bd5e7ca8387ea3 2024-11-26T10:35:24,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/153fcf6dff8f433c90bd5e7ca8387ea3, entries=200, sequenceid=15, filesize=14.0 K 2024-11-26T10:35:24,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/9289fa94d87e4c6d8549e0eda12dd9bc as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/9289fa94d87e4c6d8549e0eda12dd9bc 2024-11-26T10:35:24,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/9289fa94d87e4c6d8549e0eda12dd9bc, entries=150, sequenceid=15, filesize=11.7 K 2024-11-26T10:35:24,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/61c94bfcc0994e0180f22a89a7a1a5f2 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/61c94bfcc0994e0180f22a89a7a1a5f2 2024-11-26T10:35:24,280 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/61c94bfcc0994e0180f22a89a7a1a5f2, entries=150, sequenceid=15, filesize=11.7 K 2024-11-26T10:35:24,280 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1314ms, sequenceid=15, compaction requested=false 2024-11-26T10:35:24,281 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-26T10:35:24,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:24,325 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:24,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-26T10:35:24,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:24,325 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-26T10:35:24,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:24,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:24,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:24,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:24,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:24,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:24,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/fcc37984f0c94c45ade74a22ef53ab55 is 50, key is test_row_0/A:col10/1732617322974/Put/seqid=0 2024-11-26T10:35:24,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742250_1426 (size=12001) 2024-11-26T10:35:24,734 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/fcc37984f0c94c45ade74a22ef53ab55 2024-11-26T10:35:24,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/6f9ac860af7d4e13b10d51bdcb7de0e3 is 50, key is test_row_0/B:col10/1732617322974/Put/seqid=0 2024-11-26T10:35:24,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742251_1427 (size=12001) 2024-11-26T10:35:25,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-26T10:35:25,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:25,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:25,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617385104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617385105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617385108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617385109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617385110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,143 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/6f9ac860af7d4e13b10d51bdcb7de0e3 2024-11-26T10:35:25,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b886fffbb2794f6baaf75615fb626473 is 50, key is test_row_0/C:col10/1732617322974/Put/seqid=0 2024-11-26T10:35:25,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742252_1428 (size=12001) 2024-11-26T10:35:25,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617385211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617385211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617385215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617385215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,265 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-26T10:35:25,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617385414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617385415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617385419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617385420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,551 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b886fffbb2794f6baaf75615fb626473 2024-11-26T10:35:25,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/fcc37984f0c94c45ade74a22ef53ab55 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/fcc37984f0c94c45ade74a22ef53ab55 2024-11-26T10:35:25,557 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/fcc37984f0c94c45ade74a22ef53ab55, entries=150, sequenceid=39, filesize=11.7 K 2024-11-26T10:35:25,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/6f9ac860af7d4e13b10d51bdcb7de0e3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6f9ac860af7d4e13b10d51bdcb7de0e3 2024-11-26T10:35:25,561 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6f9ac860af7d4e13b10d51bdcb7de0e3, entries=150, sequenceid=39, filesize=11.7 K 2024-11-26T10:35:25,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b886fffbb2794f6baaf75615fb626473 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b886fffbb2794f6baaf75615fb626473 2024-11-26T10:35:25,564 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b886fffbb2794f6baaf75615fb626473, entries=150, sequenceid=39, filesize=11.7 K 2024-11-26T10:35:25,565 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1239ms, sequenceid=39, compaction requested=false 2024-11-26T10:35:25,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:25,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:25,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-26T10:35:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-26T10:35:25,566 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-26T10:35:25,566 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6090 sec 2024-11-26T10:35:25,567 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 2.6130 sec 2024-11-26T10:35:25,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:25,721 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:35:25,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:25,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:25,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:25,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:25,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:25,722 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:25,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/dbd09fae26464be6ac476274fa248e4a is 50, key is test_row_0/A:col10/1732617325092/Put/seqid=0 2024-11-26T10:35:25,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742253_1429 (size=14341) 2024-11-26T10:35:25,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617385747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617385748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617385748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617385752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617385853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617385853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617385854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:25,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:25,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617385854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617386056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617386057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617386058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617386058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,129 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/dbd09fae26464be6ac476274fa248e4a 2024-11-26T10:35:26,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/31b928f44d2740ab993002d534b21a9c is 50, key is test_row_0/B:col10/1732617325092/Put/seqid=0 2024-11-26T10:35:26,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742254_1430 (size=12001) 2024-11-26T10:35:26,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/31b928f44d2740ab993002d534b21a9c 2024-11-26T10:35:26,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/f47a26c67f6b4107af4d5b33a334abd8 is 50, key is test_row_0/C:col10/1732617325092/Put/seqid=0 2024-11-26T10:35:26,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742255_1431 (size=12001) 2024-11-26T10:35:26,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617386359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617386364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617386364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617386364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/f47a26c67f6b4107af4d5b33a334abd8 2024-11-26T10:35:26,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/dbd09fae26464be6ac476274fa248e4a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/dbd09fae26464be6ac476274fa248e4a 2024-11-26T10:35:26,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/dbd09fae26464be6ac476274fa248e4a, entries=200, sequenceid=53, filesize=14.0 K 2024-11-26T10:35:26,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/31b928f44d2740ab993002d534b21a9c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/31b928f44d2740ab993002d534b21a9c 2024-11-26T10:35:26,556 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/31b928f44d2740ab993002d534b21a9c, entries=150, sequenceid=53, filesize=11.7 K 2024-11-26T10:35:26,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/f47a26c67f6b4107af4d5b33a334abd8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f47a26c67f6b4107af4d5b33a334abd8 2024-11-26T10:35:26,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f47a26c67f6b4107af4d5b33a334abd8, entries=150, sequenceid=53, filesize=11.7 K 2024-11-26T10:35:26,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 839ms, sequenceid=53, compaction requested=true 2024-11-26T10:35:26,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:26,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:26,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:26,560 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:26,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:26,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:26,560 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:26,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:26,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:26,561 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:26,561 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:26,561 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/A is initiating minor compaction (all files) 2024-11-26T10:35:26,561 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/B is initiating minor compaction (all files) 2024-11-26T10:35:26,561 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/A in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:26,561 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/B in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:26,561 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/9289fa94d87e4c6d8549e0eda12dd9bc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6f9ac860af7d4e13b10d51bdcb7de0e3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/31b928f44d2740ab993002d534b21a9c] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=35.2 K 2024-11-26T10:35:26,561 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/153fcf6dff8f433c90bd5e7ca8387ea3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/fcc37984f0c94c45ade74a22ef53ab55, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/dbd09fae26464be6ac476274fa248e4a] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=39.7 K 2024-11-26T10:35:26,561 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 9289fa94d87e4c6d8549e0eda12dd9bc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732617322965 2024-11-26T10:35:26,561 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 153fcf6dff8f433c90bd5e7ca8387ea3, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732617322963 2024-11-26T10:35:26,561 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f9ac860af7d4e13b10d51bdcb7de0e3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732617322972 2024-11-26T10:35:26,561 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting fcc37984f0c94c45ade74a22ef53ab55, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732617322972 2024-11-26T10:35:26,561 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 31b928f44d2740ab993002d534b21a9c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732617325092 2024-11-26T10:35:26,561 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbd09fae26464be6ac476274fa248e4a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732617325092 2024-11-26T10:35:26,566 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#B#compaction#362 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:26,566 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#A#compaction#363 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:26,567 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/7ad21eba13454eb297af499091df7b98 is 50, key is test_row_0/B:col10/1732617325092/Put/seqid=0 2024-11-26T10:35:26,567 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/78558302b45a444fbe06360463c74c03 is 50, key is test_row_0/A:col10/1732617325092/Put/seqid=0 2024-11-26T10:35:26,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742256_1432 (size=12104) 2024-11-26T10:35:26,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742257_1433 (size=12104) 2024-11-26T10:35:26,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:26,866 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-26T10:35:26,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:26,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:26,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:26,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:26,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:26,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:26,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617386882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617386885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617386887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617386887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/1bfc8759d24e4d559a274f116736a3a6 is 50, key is test_row_0/A:col10/1732617325751/Put/seqid=0 2024-11-26T10:35:26,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742258_1434 (size=14341) 2024-11-26T10:35:26,979 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/7ad21eba13454eb297af499091df7b98 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/7ad21eba13454eb297af499091df7b98 2024-11-26T10:35:26,979 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/78558302b45a444fbe06360463c74c03 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/78558302b45a444fbe06360463c74c03 2024-11-26T10:35:26,983 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/B of 4d8f0b5b9c9359e0eccc71ea40315b28 into 7ad21eba13454eb297af499091df7b98(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:26,983 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/A of 4d8f0b5b9c9359e0eccc71ea40315b28 into 78558302b45a444fbe06360463c74c03(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:26,983 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:26,983 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:26,983 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/B, priority=13, startTime=1732617326560; duration=0sec 2024-11-26T10:35:26,983 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/A, priority=13, startTime=1732617326560; duration=0sec 2024-11-26T10:35:26,983 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:26,983 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:B 2024-11-26T10:35:26,983 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:26,983 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:A 2024-11-26T10:35:26,983 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:26,984 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:26,984 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/C is initiating minor compaction (all files) 2024-11-26T10:35:26,984 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/C in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:26,984 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/61c94bfcc0994e0180f22a89a7a1a5f2, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b886fffbb2794f6baaf75615fb626473, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f47a26c67f6b4107af4d5b33a334abd8] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=35.2 K 2024-11-26T10:35:26,984 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 61c94bfcc0994e0180f22a89a7a1a5f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732617322965 2024-11-26T10:35:26,984 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b886fffbb2794f6baaf75615fb626473, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732617322972 2024-11-26T10:35:26,985 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting f47a26c67f6b4107af4d5b33a334abd8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732617325092 2024-11-26T10:35:26,989 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#C#compaction#365 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:26,990 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b51a2a1baa224d3888e3993faefc3290 is 50, key is test_row_0/C:col10/1732617325092/Put/seqid=0 2024-11-26T10:35:26,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742259_1435 (size=12104) 2024-11-26T10:35:26,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617386991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617386993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:26,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:26,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617386994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:27,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617386995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-26T10:35:27,060 INFO [Thread-1913 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-26T10:35:27,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:27,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-26T10:35:27,062 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:27,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-26T10:35:27,062 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:27,062 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:27,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:27,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617387115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,120 DEBUG [Thread-1907 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:27,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-26T10:35:27,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:27,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617387196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:27,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617387200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:27,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617387200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:27,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617387202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,213 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-26T10:35:27,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:27,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/1bfc8759d24e4d559a274f116736a3a6 2024-11-26T10:35:27,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/16662a7059624af584a451b1baa2ea41 is 50, key is test_row_0/B:col10/1732617325751/Put/seqid=0 2024-11-26T10:35:27,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742260_1436 (size=12001) 2024-11-26T10:35:27,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-26T10:35:27,365 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-26T10:35:27,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:27,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,396 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b51a2a1baa224d3888e3993faefc3290 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b51a2a1baa224d3888e3993faefc3290 2024-11-26T10:35:27,400 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/C of 4d8f0b5b9c9359e0eccc71ea40315b28 into b51a2a1baa224d3888e3993faefc3290(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:27,400 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:27,400 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/C, priority=13, startTime=1732617326560; duration=0sec 2024-11-26T10:35:27,400 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:27,400 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:C 2024-11-26T10:35:27,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:27,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617387500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:27,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617387503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:27,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617387505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617387506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,518 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-26T10:35:27,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:27,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,518 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-26T10:35:27,670 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-26T10:35:27,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:27,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/16662a7059624af584a451b1baa2ea41 2024-11-26T10:35:27,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/0c9d0deaed6b4d869bb93ab55a299b7d is 50, key is test_row_0/C:col10/1732617325751/Put/seqid=0 2024-11-26T10:35:27,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742261_1437 (size=12001) 2024-11-26T10:35:27,822 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-26T10:35:27,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:27,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,823 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,975 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:27,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-26T10:35:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:27,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:28,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:28,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617388004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:28,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:28,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617388008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:28,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:28,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617388009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:28,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:28,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617388012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:28,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/0c9d0deaed6b4d869bb93ab55a299b7d 2024-11-26T10:35:28,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/1bfc8759d24e4d559a274f116736a3a6 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/1bfc8759d24e4d559a274f116736a3a6 2024-11-26T10:35:28,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/1bfc8759d24e4d559a274f116736a3a6, entries=200, sequenceid=76, filesize=14.0 K 2024-11-26T10:35:28,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/16662a7059624af584a451b1baa2ea41 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/16662a7059624af584a451b1baa2ea41 2024-11-26T10:35:28,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/16662a7059624af584a451b1baa2ea41, entries=150, sequenceid=76, filesize=11.7 K 2024-11-26T10:35:28,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/0c9d0deaed6b4d869bb93ab55a299b7d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/0c9d0deaed6b4d869bb93ab55a299b7d 2024-11-26T10:35:28,127 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:28,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-26T10:35:28,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:28,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:28,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:28,127 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:28,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:28,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/0c9d0deaed6b4d869bb93ab55a299b7d, entries=150, sequenceid=76, filesize=11.7 K 2024-11-26T10:35:28,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:28,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1262ms, sequenceid=76, compaction requested=false 2024-11-26T10:35:28,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:28,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-26T10:35:28,279 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:28,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-26T10:35:28,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:28,279 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:35:28,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:28,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:28,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:28,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:28,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:28,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:28,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/0f6c41413bff42669900425c9a9e3192 is 50, key is test_row_0/A:col10/1732617326884/Put/seqid=0 2024-11-26T10:35:28,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742262_1438 (size=12001) 2024-11-26T10:35:28,289 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/0f6c41413bff42669900425c9a9e3192 2024-11-26T10:35:28,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/425d2ec3e4c74a8480e9a70b02e6d599 is 50, key is test_row_0/B:col10/1732617326884/Put/seqid=0 2024-11-26T10:35:28,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742263_1439 (size=12001) 2024-11-26T10:35:28,705 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/425d2ec3e4c74a8480e9a70b02e6d599 2024-11-26T10:35:28,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b98fbb3930b748099e5c17cd3b77e1d9 is 50, key is test_row_0/C:col10/1732617326884/Put/seqid=0 2024-11-26T10:35:28,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742264_1440 (size=12001) 2024-11-26T10:35:28,713 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b98fbb3930b748099e5c17cd3b77e1d9 2024-11-26T10:35:28,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/0f6c41413bff42669900425c9a9e3192 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/0f6c41413bff42669900425c9a9e3192 2024-11-26T10:35:28,718 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/0f6c41413bff42669900425c9a9e3192, entries=150, sequenceid=92, filesize=11.7 K 2024-11-26T10:35:28,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/425d2ec3e4c74a8480e9a70b02e6d599 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/425d2ec3e4c74a8480e9a70b02e6d599 2024-11-26T10:35:28,722 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/425d2ec3e4c74a8480e9a70b02e6d599, entries=150, sequenceid=92, filesize=11.7 K 2024-11-26T10:35:28,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b98fbb3930b748099e5c17cd3b77e1d9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b98fbb3930b748099e5c17cd3b77e1d9 2024-11-26T10:35:28,725 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b98fbb3930b748099e5c17cd3b77e1d9, entries=150, sequenceid=92, filesize=11.7 K 2024-11-26T10:35:28,726 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 447ms, sequenceid=92, compaction requested=true 2024-11-26T10:35:28,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:28,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:28,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-26T10:35:28,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-26T10:35:28,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-26T10:35:28,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6650 sec 2024-11-26T10:35:28,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.6670 sec 2024-11-26T10:35:29,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:29,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-26T10:35:29,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:29,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:29,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:29,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:29,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:29,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:29,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/6f310621b4d0417ba3bae26affb1121c is 50, key is test_row_0/A:col10/1732617329022/Put/seqid=0 2024-11-26T10:35:29,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742265_1441 (size=16681) 2024-11-26T10:35:29,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617389057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617389058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617389059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617389061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-26T10:35:29,165 INFO [Thread-1913 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-26T10:35:29,166 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:29,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-26T10:35:29,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-26T10:35:29,167 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:29,167 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:29,167 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:29,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617389165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617389165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617389165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617389166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-26T10:35:29,319 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-26T10:35:29,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:29,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:29,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:29,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617389369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617389369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617389369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617389369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,431 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/6f310621b4d0417ba3bae26affb1121c 2024-11-26T10:35:29,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/6fce3b54059c4b1089e4b30c41e941e7 is 50, key is test_row_0/B:col10/1732617329022/Put/seqid=0 2024-11-26T10:35:29,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742266_1442 (size=12001) 2024-11-26T10:35:29,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-26T10:35:29,471 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-26T10:35:29,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:29,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:29,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:29,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,623 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-26T10:35:29,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:29,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:29,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:29,624 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617389674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617389674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617389674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617389676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-26T10:35:29,775 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-26T10:35:29,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:29,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:29,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:29,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/6fce3b54059c4b1089e4b30c41e941e7 2024-11-26T10:35:29,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/e34137eabcf347dabfa923e980585b3b is 50, key is test_row_0/C:col10/1732617329022/Put/seqid=0 2024-11-26T10:35:29,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742267_1443 (size=12001) 2024-11-26T10:35:29,927 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:29,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-26T10:35:29,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:29,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:29,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:29,928 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:29,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:30,079 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:30,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-26T10:35:30,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:30,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:30,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:30,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:30,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:30,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:30,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:30,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617390182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:30,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:30,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617390182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:30,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:30,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617390183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:30,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:30,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617390184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:30,232 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:30,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-26T10:35:30,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:30,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:30,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:30,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:30,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:30,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:30,250 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/e34137eabcf347dabfa923e980585b3b 2024-11-26T10:35:30,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/6f310621b4d0417ba3bae26affb1121c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6f310621b4d0417ba3bae26affb1121c 2024-11-26T10:35:30,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6f310621b4d0417ba3bae26affb1121c, entries=250, sequenceid=103, filesize=16.3 K 2024-11-26T10:35:30,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/6fce3b54059c4b1089e4b30c41e941e7 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6fce3b54059c4b1089e4b30c41e941e7 2024-11-26T10:35:30,260 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6fce3b54059c4b1089e4b30c41e941e7, entries=150, sequenceid=103, filesize=11.7 K 2024-11-26T10:35:30,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/e34137eabcf347dabfa923e980585b3b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/e34137eabcf347dabfa923e980585b3b 2024-11-26T10:35:30,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/e34137eabcf347dabfa923e980585b3b, entries=150, sequenceid=103, filesize=11.7 K 2024-11-26T10:35:30,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1241ms, sequenceid=103, compaction requested=true 2024-11-26T10:35:30,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:30,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:30,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:30,264 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:35:30,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:30,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:30,264 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:35:30,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:30,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:30,265 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55127 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:35:30,265 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:35:30,265 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/B is initiating minor compaction (all files) 2024-11-26T10:35:30,265 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/A is initiating minor compaction (all files) 2024-11-26T10:35:30,265 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/B in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:30,265 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/A in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:30,265 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/7ad21eba13454eb297af499091df7b98, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/16662a7059624af584a451b1baa2ea41, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/425d2ec3e4c74a8480e9a70b02e6d599, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6fce3b54059c4b1089e4b30c41e941e7] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=47.0 K 2024-11-26T10:35:30,265 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/78558302b45a444fbe06360463c74c03, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/1bfc8759d24e4d559a274f116736a3a6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/0f6c41413bff42669900425c9a9e3192, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6f310621b4d0417ba3bae26affb1121c] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=53.8 K 2024-11-26T10:35:30,265 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78558302b45a444fbe06360463c74c03, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732617325092 2024-11-26T10:35:30,265 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ad21eba13454eb297af499091df7b98, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732617325092 2024-11-26T10:35:30,266 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bfc8759d24e4d559a274f116736a3a6, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732617325742 2024-11-26T10:35:30,266 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 16662a7059624af584a451b1baa2ea41, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732617325748 2024-11-26T10:35:30,266 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f6c41413bff42669900425c9a9e3192, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732617326875 2024-11-26T10:35:30,266 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 425d2ec3e4c74a8480e9a70b02e6d599, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732617326875 2024-11-26T10:35:30,266 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f310621b4d0417ba3bae26affb1121c, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1732617329017 2024-11-26T10:35:30,266 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fce3b54059c4b1089e4b30c41e941e7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1732617329020 2024-11-26T10:35:30,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-26T10:35:30,272 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#B#compaction#374 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:30,273 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/146a858013fa46f7bbe9407a76f0d094 is 50, key is test_row_0/B:col10/1732617329022/Put/seqid=0 2024-11-26T10:35:30,273 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#A#compaction#375 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:30,273 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/863ba5a171fc4301b054e439e749f093 is 50, key is test_row_0/A:col10/1732617329022/Put/seqid=0 2024-11-26T10:35:30,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742268_1444 (size=12241) 2024-11-26T10:35:30,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742269_1445 (size=12241) 2024-11-26T10:35:30,384 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:30,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-26T10:35:30,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:30,385 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-26T10:35:30,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:30,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:30,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:30,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:30,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:30,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:30,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/5df7a51c904b409bb992bcb9c2f672c5 is 50, key is test_row_0/A:col10/1732617329060/Put/seqid=0 2024-11-26T10:35:30,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742270_1446 (size=12001) 2024-11-26T10:35:30,680 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/146a858013fa46f7bbe9407a76f0d094 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/146a858013fa46f7bbe9407a76f0d094 2024-11-26T10:35:30,680 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/863ba5a171fc4301b054e439e749f093 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/863ba5a171fc4301b054e439e749f093 2024-11-26T10:35:30,683 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/B of 4d8f0b5b9c9359e0eccc71ea40315b28 into 146a858013fa46f7bbe9407a76f0d094(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:30,683 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/A of 4d8f0b5b9c9359e0eccc71ea40315b28 into 863ba5a171fc4301b054e439e749f093(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:30,683 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:30,683 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:30,683 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/B, priority=12, startTime=1732617330264; duration=0sec 2024-11-26T10:35:30,683 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/A, priority=12, startTime=1732617330264; duration=0sec 2024-11-26T10:35:30,684 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:30,684 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:B 2024-11-26T10:35:30,684 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:30,684 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:A 2024-11-26T10:35:30,684 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-26T10:35:30,684 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-26T10:35:30,684 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/C is initiating minor compaction (all files) 2024-11-26T10:35:30,685 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/C in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:30,685 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b51a2a1baa224d3888e3993faefc3290, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/0c9d0deaed6b4d869bb93ab55a299b7d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b98fbb3930b748099e5c17cd3b77e1d9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/e34137eabcf347dabfa923e980585b3b] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=47.0 K 2024-11-26T10:35:30,685 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b51a2a1baa224d3888e3993faefc3290, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732617325092 2024-11-26T10:35:30,685 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c9d0deaed6b4d869bb93ab55a299b7d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732617325748 2024-11-26T10:35:30,685 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b98fbb3930b748099e5c17cd3b77e1d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732617326875 2024-11-26T10:35:30,685 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting e34137eabcf347dabfa923e980585b3b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1732617329020 2024-11-26T10:35:30,691 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#C#compaction#377 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:30,691 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/af427ebca65b463297d991eaa2ccfbeb is 50, key is test_row_0/C:col10/1732617329022/Put/seqid=0 2024-11-26T10:35:30,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742271_1447 (size=12241) 2024-11-26T10:35:30,792 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/5df7a51c904b409bb992bcb9c2f672c5 2024-11-26T10:35:30,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/dbaf68aa56e54a7c813c25b965a79a15 is 50, key is test_row_0/B:col10/1732617329060/Put/seqid=0 2024-11-26T10:35:30,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742272_1448 (size=12001) 2024-11-26T10:35:30,807 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/dbaf68aa56e54a7c813c25b965a79a15 2024-11-26T10:35:30,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/021d6239126c411ca61d53062aa65a76 is 50, key is test_row_0/C:col10/1732617329060/Put/seqid=0 2024-11-26T10:35:30,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742273_1449 (size=12001) 2024-11-26T10:35:31,099 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/af427ebca65b463297d991eaa2ccfbeb as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/af427ebca65b463297d991eaa2ccfbeb 2024-11-26T10:35:31,102 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/C of 4d8f0b5b9c9359e0eccc71ea40315b28 into af427ebca65b463297d991eaa2ccfbeb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:31,102 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:31,102 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/C, priority=12, startTime=1732617330264; duration=0sec 2024-11-26T10:35:31,102 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:31,102 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:C 2024-11-26T10:35:31,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:31,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:31,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:31,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617391159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:31,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617391186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:31,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617391186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:31,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617391192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:31,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617391193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,216 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/021d6239126c411ca61d53062aa65a76 2024-11-26T10:35:31,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/5df7a51c904b409bb992bcb9c2f672c5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/5df7a51c904b409bb992bcb9c2f672c5 2024-11-26T10:35:31,223 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/5df7a51c904b409bb992bcb9c2f672c5, entries=150, sequenceid=128, filesize=11.7 K 2024-11-26T10:35:31,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/dbaf68aa56e54a7c813c25b965a79a15 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/dbaf68aa56e54a7c813c25b965a79a15 2024-11-26T10:35:31,227 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/dbaf68aa56e54a7c813c25b965a79a15, entries=150, sequenceid=128, filesize=11.7 K 2024-11-26T10:35:31,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/021d6239126c411ca61d53062aa65a76 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/021d6239126c411ca61d53062aa65a76 2024-11-26T10:35:31,230 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/021d6239126c411ca61d53062aa65a76, entries=150, sequenceid=128, filesize=11.7 K 2024-11-26T10:35:31,231 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 846ms, sequenceid=128, compaction requested=false 2024-11-26T10:35:31,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:31,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:31,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-26T10:35:31,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-26T10:35:31,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-26T10:35:31,233 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0650 sec 2024-11-26T10:35:31,233 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.0670 sec 2024-11-26T10:35:31,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:31,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-26T10:35:31,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:31,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:31,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:31,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:31,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:31,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:31,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/04104f12534b4b68b870c9b3a7bde2cb is 50, key is test_row_0/A:col10/1732617331265/Put/seqid=0 2024-11-26T10:35:31,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-26T10:35:31,270 INFO [Thread-1913 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-26T10:35:31,271 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:31,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-26T10:35:31,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-26T10:35:31,272 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:31,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742274_1450 (size=14541) 2024-11-26T10:35:31,273 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:31,273 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:31,273 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/04104f12534b4b68b870c9b3a7bde2cb 2024-11-26T10:35:31,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/b6af5af8eb774bb0b7f07e9b41ac95c9 is 50, key is test_row_0/B:col10/1732617331265/Put/seqid=0 2024-11-26T10:35:31,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742275_1451 (size=12151) 2024-11-26T10:35:31,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-26T10:35:31,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:31,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617391386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,425 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-26T10:35:31,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:31,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:31,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:31,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:31,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:31,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:31,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:31,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617391492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-26T10:35:31,578 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-26T10:35:31,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:31,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:31,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:31,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:31,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:31,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:31,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/b6af5af8eb774bb0b7f07e9b41ac95c9 2024-11-26T10:35:31,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/2fa1c1968c154a9d8c2a606345caf5c0 is 50, key is test_row_0/C:col10/1732617331265/Put/seqid=0 2024-11-26T10:35:31,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742276_1452 (size=12151) 2024-11-26T10:35:31,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:31,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617391698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,730 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-26T10:35:31,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:31,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:31,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:31,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:31,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:31,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:31,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-26T10:35:31,882 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:31,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-26T10:35:31,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:31,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:31,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:31,883 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:31,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:32,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:32,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617392002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:32,034 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:32,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-26T10:35:32,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:32,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:32,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:32,035 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:32,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:32,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:32,095 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/2fa1c1968c154a9d8c2a606345caf5c0 2024-11-26T10:35:32,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/04104f12534b4b68b870c9b3a7bde2cb as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/04104f12534b4b68b870c9b3a7bde2cb 2024-11-26T10:35:32,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/04104f12534b4b68b870c9b3a7bde2cb, entries=200, sequenceid=143, filesize=14.2 K 2024-11-26T10:35:32,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/b6af5af8eb774bb0b7f07e9b41ac95c9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/b6af5af8eb774bb0b7f07e9b41ac95c9 2024-11-26T10:35:32,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/b6af5af8eb774bb0b7f07e9b41ac95c9, entries=150, sequenceid=143, filesize=11.9 K 2024-11-26T10:35:32,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/2fa1c1968c154a9d8c2a606345caf5c0 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2fa1c1968c154a9d8c2a606345caf5c0 2024-11-26T10:35:32,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2fa1c1968c154a9d8c2a606345caf5c0, entries=150, sequenceid=143, filesize=11.9 K 2024-11-26T10:35:32,107 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 842ms, sequenceid=143, compaction requested=true 2024-11-26T10:35:32,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:32,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:32,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:32,108 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:32,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:32,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:32,108 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:32,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:32,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:32,108 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38783 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:32,108 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:32,108 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/A is initiating minor compaction (all files) 2024-11-26T10:35:32,108 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/B is initiating minor compaction (all files) 2024-11-26T10:35:32,108 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/A in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:32,108 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/B in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:32,108 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/863ba5a171fc4301b054e439e749f093, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/5df7a51c904b409bb992bcb9c2f672c5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/04104f12534b4b68b870c9b3a7bde2cb] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=37.9 K 2024-11-26T10:35:32,108 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/146a858013fa46f7bbe9407a76f0d094, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/dbaf68aa56e54a7c813c25b965a79a15, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/b6af5af8eb774bb0b7f07e9b41ac95c9] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=35.5 K 2024-11-26T10:35:32,109 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 146a858013fa46f7bbe9407a76f0d094, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1732617329020 2024-11-26T10:35:32,109 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 863ba5a171fc4301b054e439e749f093, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1732617329020 2024-11-26T10:35:32,109 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting dbaf68aa56e54a7c813c25b965a79a15, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732617329057 2024-11-26T10:35:32,109 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5df7a51c904b409bb992bcb9c2f672c5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732617329057 2024-11-26T10:35:32,109 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b6af5af8eb774bb0b7f07e9b41ac95c9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1732617331141 2024-11-26T10:35:32,109 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04104f12534b4b68b870c9b3a7bde2cb, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1732617331141 2024-11-26T10:35:32,125 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#A#compaction#383 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:32,126 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/a86e25eb01884916b50afce6f148a47e is 50, key is test_row_0/A:col10/1732617331265/Put/seqid=0 2024-11-26T10:35:32,126 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#B#compaction#384 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:32,127 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/28787471414345f0b731e0ea5d91e865 is 50, key is test_row_0/B:col10/1732617331265/Put/seqid=0 2024-11-26T10:35:32,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742278_1454 (size=12493) 2024-11-26T10:35:32,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742277_1453 (size=12493) 2024-11-26T10:35:32,186 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:32,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-26T10:35:32,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:32,187 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-26T10:35:32,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:32,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:32,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:32,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:32,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:32,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:32,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/a616f4c4619c47348c9627a29b9ab793 is 50, key is test_row_0/A:col10/1732617331373/Put/seqid=0 2024-11-26T10:35:32,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742279_1455 (size=12151) 2024-11-26T10:35:32,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-26T10:35:32,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:32,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:32,541 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/a86e25eb01884916b50afce6f148a47e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a86e25eb01884916b50afce6f148a47e 2024-11-26T10:35:32,545 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/A of 4d8f0b5b9c9359e0eccc71ea40315b28 into a86e25eb01884916b50afce6f148a47e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:32,545 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:32,545 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/A, priority=13, startTime=1732617332107; duration=0sec 2024-11-26T10:35:32,545 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:32,545 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:A 2024-11-26T10:35:32,545 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:32,546 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:32,546 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/C is initiating minor compaction (all files) 2024-11-26T10:35:32,547 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/C in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:32,547 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/af427ebca65b463297d991eaa2ccfbeb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/021d6239126c411ca61d53062aa65a76, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2fa1c1968c154a9d8c2a606345caf5c0] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=35.5 K 2024-11-26T10:35:32,547 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting af427ebca65b463297d991eaa2ccfbeb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1732617329020 2024-11-26T10:35:32,547 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 021d6239126c411ca61d53062aa65a76, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732617329057 2024-11-26T10:35:32,547 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/28787471414345f0b731e0ea5d91e865 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/28787471414345f0b731e0ea5d91e865 2024-11-26T10:35:32,548 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fa1c1968c154a9d8c2a606345caf5c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1732617331141 2024-11-26T10:35:32,551 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/B of 4d8f0b5b9c9359e0eccc71ea40315b28 into 28787471414345f0b731e0ea5d91e865(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:32,551 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:32,551 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/B, priority=13, startTime=1732617332108; duration=0sec 2024-11-26T10:35:32,551 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:32,551 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:B 2024-11-26T10:35:32,557 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#C#compaction#386 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:32,558 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/08323784fc5c428fab316a6ca87793a9 is 50, key is test_row_0/C:col10/1732617331265/Put/seqid=0 2024-11-26T10:35:32,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742280_1456 (size=12493) 2024-11-26T10:35:32,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:32,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617392572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:32,594 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/a616f4c4619c47348c9627a29b9ab793 2024-11-26T10:35:32,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/c5c2a0f8b5d84a6c8652b5d9e0337bd7 is 50, key is test_row_0/B:col10/1732617331373/Put/seqid=0 2024-11-26T10:35:32,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742281_1457 (size=12151) 2024-11-26T10:35:32,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:32,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617392678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:32,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:32,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617392882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:32,968 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/08323784fc5c428fab316a6ca87793a9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/08323784fc5c428fab316a6ca87793a9 2024-11-26T10:35:32,972 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/C of 4d8f0b5b9c9359e0eccc71ea40315b28 into 08323784fc5c428fab316a6ca87793a9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:32,972 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:32,972 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/C, priority=13, startTime=1732617332108; duration=0sec 2024-11-26T10:35:32,972 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:32,972 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:C 2024-11-26T10:35:33,003 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/c5c2a0f8b5d84a6c8652b5d9e0337bd7 2024-11-26T10:35:33,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/2cbf02fd7c0b4f8ab2cefc735661a457 is 50, key is test_row_0/C:col10/1732617331373/Put/seqid=0 2024-11-26T10:35:33,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742282_1458 (size=12151) 2024-11-26T10:35:33,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:33,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617393186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:33,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:33,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617393194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:33,197 DEBUG [Thread-1905 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:33,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:33,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617393201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:33,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:33,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617393202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:33,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:33,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617393203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:33,205 DEBUG [Thread-1909 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:33,205 DEBUG [Thread-1903 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:33,206 DEBUG [Thread-1911 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:33,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-26T10:35:33,411 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/2cbf02fd7c0b4f8ab2cefc735661a457 2024-11-26T10:35:33,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/a616f4c4619c47348c9627a29b9ab793 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a616f4c4619c47348c9627a29b9ab793 2024-11-26T10:35:33,417 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a616f4c4619c47348c9627a29b9ab793, entries=150, sequenceid=167, filesize=11.9 K 2024-11-26T10:35:33,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/c5c2a0f8b5d84a6c8652b5d9e0337bd7 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/c5c2a0f8b5d84a6c8652b5d9e0337bd7 2024-11-26T10:35:33,420 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/c5c2a0f8b5d84a6c8652b5d9e0337bd7, entries=150, sequenceid=167, filesize=11.9 K 2024-11-26T10:35:33,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/2cbf02fd7c0b4f8ab2cefc735661a457 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2cbf02fd7c0b4f8ab2cefc735661a457 2024-11-26T10:35:33,423 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2cbf02fd7c0b4f8ab2cefc735661a457, entries=150, sequenceid=167, filesize=11.9 K 2024-11-26T10:35:33,424 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1237ms, sequenceid=167, compaction requested=false 2024-11-26T10:35:33,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:33,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:33,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-26T10:35:33,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-26T10:35:33,426 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-26T10:35:33,426 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1520 sec 2024-11-26T10:35:33,426 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.1550 sec 2024-11-26T10:35:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:33,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:35:33,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:33,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:33,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:33,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:33,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:33,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:33,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/2438690c48dc48e0a09fe439a1ffba32 is 50, key is test_row_0/A:col10/1732617332571/Put/seqid=0 2024-11-26T10:35:33,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742283_1459 (size=14541) 2024-11-26T10:35:33,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:33,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617393780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:33,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:33,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617393886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:34,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617394090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:34,104 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/2438690c48dc48e0a09fe439a1ffba32 2024-11-26T10:35:34,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/02d21a33fbcd400eb24cfc21127b15c3 is 50, key is test_row_0/B:col10/1732617332571/Put/seqid=0 2024-11-26T10:35:34,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742284_1460 (size=12151) 2024-11-26T10:35:34,344 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:35:34,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:34,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617394394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:34,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/02d21a33fbcd400eb24cfc21127b15c3 2024-11-26T10:35:34,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/972c9ca636f241fe96d5aa66f1ddec35 is 50, key is test_row_0/C:col10/1732617332571/Put/seqid=0 2024-11-26T10:35:34,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742285_1461 (size=12151) 2024-11-26T10:35:34,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:34,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617394902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:34,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/972c9ca636f241fe96d5aa66f1ddec35 2024-11-26T10:35:34,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/2438690c48dc48e0a09fe439a1ffba32 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/2438690c48dc48e0a09fe439a1ffba32 2024-11-26T10:35:34,941 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/2438690c48dc48e0a09fe439a1ffba32, entries=200, sequenceid=183, filesize=14.2 K 2024-11-26T10:35:34,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/02d21a33fbcd400eb24cfc21127b15c3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/02d21a33fbcd400eb24cfc21127b15c3 2024-11-26T10:35:34,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/02d21a33fbcd400eb24cfc21127b15c3, entries=150, sequenceid=183, filesize=11.9 K 2024-11-26T10:35:34,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/972c9ca636f241fe96d5aa66f1ddec35 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/972c9ca636f241fe96d5aa66f1ddec35 2024-11-26T10:35:34,947 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/972c9ca636f241fe96d5aa66f1ddec35, entries=150, sequenceid=183, filesize=11.9 K 2024-11-26T10:35:34,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1250ms, sequenceid=183, compaction requested=true 2024-11-26T10:35:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:34,948 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:34,948 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:34,949 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:34,949 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39185 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:34,949 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/A is initiating minor compaction (all files) 2024-11-26T10:35:34,949 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/B is initiating minor compaction (all files) 2024-11-26T10:35:34,949 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/A in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:34,949 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/B in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:34,949 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/28787471414345f0b731e0ea5d91e865, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/c5c2a0f8b5d84a6c8652b5d9e0337bd7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/02d21a33fbcd400eb24cfc21127b15c3] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=35.9 K 2024-11-26T10:35:34,949 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a86e25eb01884916b50afce6f148a47e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a616f4c4619c47348c9627a29b9ab793, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/2438690c48dc48e0a09fe439a1ffba32] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=38.3 K 2024-11-26T10:35:34,949 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting a86e25eb01884916b50afce6f148a47e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1732617331141 2024-11-26T10:35:34,949 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 28787471414345f0b731e0ea5d91e865, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1732617331141 2024-11-26T10:35:34,950 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting c5c2a0f8b5d84a6c8652b5d9e0337bd7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732617331373 2024-11-26T10:35:34,950 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting a616f4c4619c47348c9627a29b9ab793, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732617331373 2024-11-26T10:35:34,950 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 02d21a33fbcd400eb24cfc21127b15c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732617332528 2024-11-26T10:35:34,950 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2438690c48dc48e0a09fe439a1ffba32, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732617332528 2024-11-26T10:35:34,954 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#A#compaction#392 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:34,955 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/c4c452440c814e4986859280015171a3 is 50, key is test_row_0/A:col10/1732617332571/Put/seqid=0 2024-11-26T10:35:34,957 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#B#compaction#393 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:34,957 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/d4b85a5c7d2a4d069ae88b865a235f30 is 50, key is test_row_0/B:col10/1732617332571/Put/seqid=0 2024-11-26T10:35:34,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742286_1462 (size=12595) 2024-11-26T10:35:34,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742287_1463 (size=12595) 2024-11-26T10:35:35,368 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/c4c452440c814e4986859280015171a3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/c4c452440c814e4986859280015171a3 2024-11-26T10:35:35,374 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/d4b85a5c7d2a4d069ae88b865a235f30 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/d4b85a5c7d2a4d069ae88b865a235f30 2024-11-26T10:35:35,374 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/A of 4d8f0b5b9c9359e0eccc71ea40315b28 into c4c452440c814e4986859280015171a3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:35,374 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:35,374 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/A, priority=13, startTime=1732617334948; duration=0sec 2024-11-26T10:35:35,374 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:35,374 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:A 2024-11-26T10:35:35,375 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:35,375 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:35,375 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/C is initiating minor compaction (all files) 2024-11-26T10:35:35,375 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/C in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:35,375 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/08323784fc5c428fab316a6ca87793a9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2cbf02fd7c0b4f8ab2cefc735661a457, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/972c9ca636f241fe96d5aa66f1ddec35] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=35.9 K 2024-11-26T10:35:35,376 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08323784fc5c428fab316a6ca87793a9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1732617331141 2024-11-26T10:35:35,376 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cbf02fd7c0b4f8ab2cefc735661a457, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732617331373 2024-11-26T10:35:35,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-26T10:35:35,377 INFO [Thread-1913 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-26T10:35:35,377 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 972c9ca636f241fe96d5aa66f1ddec35, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732617332528 2024-11-26T10:35:35,378 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:35,378 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/B of 4d8f0b5b9c9359e0eccc71ea40315b28 into d4b85a5c7d2a4d069ae88b865a235f30(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:35,378 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:35,378 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/B, priority=13, startTime=1732617334948; duration=0sec 2024-11-26T10:35:35,378 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:35,378 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:B 2024-11-26T10:35:35,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-26T10:35:35,379 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:35,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-26T10:35:35,379 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:35,379 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:35,384 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#C#compaction#394 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:35,385 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/6e4fb45ae6f34987af15c413d2ae9cb9 is 50, key is test_row_0/C:col10/1732617332571/Put/seqid=0 2024-11-26T10:35:35,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742288_1464 (size=12595) 2024-11-26T10:35:35,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-26T10:35:35,531 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:35,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-26T10:35:35,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:35,531 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-26T10:35:35,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:35,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:35,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:35,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:35,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:35,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:35,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/6007163654994c56b504d7579e388727 is 50, key is test_row_0/A:col10/1732617333774/Put/seqid=0 2024-11-26T10:35:35,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742289_1465 (size=12151) 2024-11-26T10:35:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-26T10:35:35,791 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/6e4fb45ae6f34987af15c413d2ae9cb9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/6e4fb45ae6f34987af15c413d2ae9cb9 2024-11-26T10:35:35,794 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/C of 4d8f0b5b9c9359e0eccc71ea40315b28 into 6e4fb45ae6f34987af15c413d2ae9cb9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:35,794 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:35,795 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/C, priority=13, startTime=1732617334948; duration=0sec 2024-11-26T10:35:35,795 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:35,795 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:C 2024-11-26T10:35:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:35,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:35,938 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/6007163654994c56b504d7579e388727 2024-11-26T10:35:35,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/db16460f44ec4445b4c4fbdff8b38e3c is 50, key is test_row_0/B:col10/1732617333774/Put/seqid=0 2024-11-26T10:35:35,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742290_1466 (size=12151) 2024-11-26T10:35:35,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:35,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617395957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:35,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-26T10:35:36,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:36,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617396063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:36,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:36,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617396265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:36,346 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/db16460f44ec4445b4c4fbdff8b38e3c 2024-11-26T10:35:36,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/036b5bf2520145b68dfb0f227b6a10b6 is 50, key is test_row_0/C:col10/1732617333774/Put/seqid=0 2024-11-26T10:35:36,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742291_1467 (size=12151) 2024-11-26T10:35:36,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-26T10:35:36,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:36,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617396570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:36,754 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/036b5bf2520145b68dfb0f227b6a10b6 2024-11-26T10:35:36,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/6007163654994c56b504d7579e388727 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6007163654994c56b504d7579e388727 2024-11-26T10:35:36,760 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6007163654994c56b504d7579e388727, entries=150, sequenceid=208, filesize=11.9 K 2024-11-26T10:35:36,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/db16460f44ec4445b4c4fbdff8b38e3c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/db16460f44ec4445b4c4fbdff8b38e3c 2024-11-26T10:35:36,762 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/db16460f44ec4445b4c4fbdff8b38e3c, entries=150, sequenceid=208, filesize=11.9 K 2024-11-26T10:35:36,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/036b5bf2520145b68dfb0f227b6a10b6 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/036b5bf2520145b68dfb0f227b6a10b6 2024-11-26T10:35:36,766 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/036b5bf2520145b68dfb0f227b6a10b6, entries=150, sequenceid=208, filesize=11.9 K 2024-11-26T10:35:36,767 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1235ms, sequenceid=208, compaction requested=false 2024-11-26T10:35:36,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:36,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:36,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-26T10:35:36,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-26T10:35:36,768 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-26T10:35:36,768 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3880 sec 2024-11-26T10:35:36,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.3900 sec 2024-11-26T10:35:37,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:37,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-26T10:35:37,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:37,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:37,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:37,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:37,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:37,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:37,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/c98eb7f7eb8f4bdf957d147aeda74162 is 50, key is test_row_0/A:col10/1732617337079/Put/seqid=0 2024-11-26T10:35:37,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742292_1468 (size=14541) 2024-11-26T10:35:37,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:37,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617397227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:37,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34880 deadline: 1732617397228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,236 DEBUG [Thread-1903 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8177 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:37,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:37,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617397234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:37,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34828 deadline: 1732617397238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:37,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1732617397239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,241 DEBUG [Thread-1911 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8180 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:37,242 DEBUG [Thread-1909 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8183 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., hostname=ccf62758a0a5,45419,1732617185877, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:37,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617397336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:37,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617397342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-26T10:35:37,482 INFO [Thread-1913 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-26T10:35:37,483 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-26T10:35:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-26T10:35:37,484 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:37,485 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:37,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:37,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/c98eb7f7eb8f4bdf957d147aeda74162 2024-11-26T10:35:37,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:37,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617397541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:37,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617397547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/852fa31b4f574ea0ba304f11b5774d5a is 50, key is test_row_0/B:col10/1732617337079/Put/seqid=0 2024-11-26T10:35:37,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742293_1469 (size=12151) 2024-11-26T10:35:37,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-26T10:35:37,638 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-26T10:35:37,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:37,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:37,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:37,639 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:37,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:37,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-26T10:35:37,792 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-26T10:35:37,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:37,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:37,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:37,792 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:37,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:37,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617397849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617397853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,944 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:37,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-26T10:35:37,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:37,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:37,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:37,945 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:37,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:37,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/852fa31b4f574ea0ba304f11b5774d5a 2024-11-26T10:35:37,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/a83fdbd9baef459f94c5d941c5223fe8 is 50, key is test_row_0/C:col10/1732617337079/Put/seqid=0 2024-11-26T10:35:37,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742294_1470 (size=12151) 2024-11-26T10:35:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-26T10:35:38,096 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:38,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-26T10:35:38,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:38,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:38,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:38,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:38,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:38,249 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:38,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-26T10:35:38,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:38,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:38,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:38,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:38,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:38,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617398355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:38,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617398357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:38,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/a83fdbd9baef459f94c5d941c5223fe8 2024-11-26T10:35:38,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/c98eb7f7eb8f4bdf957d147aeda74162 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/c98eb7f7eb8f4bdf957d147aeda74162 2024-11-26T10:35:38,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/c98eb7f7eb8f4bdf957d147aeda74162, entries=200, sequenceid=223, filesize=14.2 K 2024-11-26T10:35:38,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/852fa31b4f574ea0ba304f11b5774d5a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/852fa31b4f574ea0ba304f11b5774d5a 2024-11-26T10:35:38,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/852fa31b4f574ea0ba304f11b5774d5a, entries=150, sequenceid=223, filesize=11.9 K 2024-11-26T10:35:38,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/a83fdbd9baef459f94c5d941c5223fe8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/a83fdbd9baef459f94c5d941c5223fe8 2024-11-26T10:35:38,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/a83fdbd9baef459f94c5d941c5223fe8, entries=150, sequenceid=223, filesize=11.9 K 2024-11-26T10:35:38,377 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1242ms, sequenceid=223, compaction requested=true 2024-11-26T10:35:38,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:38,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:38,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:38,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:38,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:38,378 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:38,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:38,378 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:38,378 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:38,378 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:38,378 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39287 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:38,378 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/A is initiating minor compaction (all files) 2024-11-26T10:35:38,378 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/B is initiating minor compaction (all files) 2024-11-26T10:35:38,379 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/A in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:38,379 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/B in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:38,379 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/c4c452440c814e4986859280015171a3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6007163654994c56b504d7579e388727, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/c98eb7f7eb8f4bdf957d147aeda74162] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=38.4 K 2024-11-26T10:35:38,379 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/d4b85a5c7d2a4d069ae88b865a235f30, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/db16460f44ec4445b4c4fbdff8b38e3c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/852fa31b4f574ea0ba304f11b5774d5a] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=36.0 K 2024-11-26T10:35:38,379 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4c452440c814e4986859280015171a3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732617332528 2024-11-26T10:35:38,379 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting d4b85a5c7d2a4d069ae88b865a235f30, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732617332528 2024-11-26T10:35:38,379 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6007163654994c56b504d7579e388727, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732617333753 2024-11-26T10:35:38,379 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting db16460f44ec4445b4c4fbdff8b38e3c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732617333753 2024-11-26T10:35:38,379 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting c98eb7f7eb8f4bdf957d147aeda74162, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732617335946 2024-11-26T10:35:38,379 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 852fa31b4f574ea0ba304f11b5774d5a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732617335950 2024-11-26T10:35:38,384 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#A#compaction#401 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:38,385 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#B#compaction#402 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:38,385 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/4ee041b569de401594935439522a85fc is 50, key is test_row_0/A:col10/1732617337079/Put/seqid=0 2024-11-26T10:35:38,385 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/eb26aca1b2dd466194d9fd3a16294ed1 is 50, key is test_row_0/B:col10/1732617337079/Put/seqid=0 2024-11-26T10:35:38,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742295_1471 (size=12697) 2024-11-26T10:35:38,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742296_1472 (size=12697) 2024-11-26T10:35:38,401 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:38,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-26T10:35:38,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:38,402 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-26T10:35:38,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:38,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:38,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:38,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:38,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:38,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:38,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/bab5891457f34db191d0e1b3f19922d6 is 50, key is test_row_0/A:col10/1732617337226/Put/seqid=0 2024-11-26T10:35:38,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742297_1473 (size=12151) 2024-11-26T10:35:38,423 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/bab5891457f34db191d0e1b3f19922d6 2024-11-26T10:35:38,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/0705aa953bad45508a3fca0394237712 is 50, key is test_row_0/B:col10/1732617337226/Put/seqid=0 2024-11-26T10:35:38,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742298_1474 (size=12151) 2024-11-26T10:35:38,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-26T10:35:38,800 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/4ee041b569de401594935439522a85fc as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/4ee041b569de401594935439522a85fc 2024-11-26T10:35:38,800 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/eb26aca1b2dd466194d9fd3a16294ed1 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/eb26aca1b2dd466194d9fd3a16294ed1 2024-11-26T10:35:38,804 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/B of 4d8f0b5b9c9359e0eccc71ea40315b28 into eb26aca1b2dd466194d9fd3a16294ed1(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:38,804 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/A of 4d8f0b5b9c9359e0eccc71ea40315b28 into 4ee041b569de401594935439522a85fc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:38,804 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:38,804 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:38,804 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/A, priority=13, startTime=1732617338378; duration=0sec 2024-11-26T10:35:38,804 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/B, priority=13, startTime=1732617338378; duration=0sec 2024-11-26T10:35:38,804 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:38,804 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:B 2024-11-26T10:35:38,804 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:38,804 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:A 2024-11-26T10:35:38,804 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:38,804 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:38,804 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/C is initiating minor compaction (all files) 2024-11-26T10:35:38,804 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/C in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:38,805 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/6e4fb45ae6f34987af15c413d2ae9cb9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/036b5bf2520145b68dfb0f227b6a10b6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/a83fdbd9baef459f94c5d941c5223fe8] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=36.0 K 2024-11-26T10:35:38,805 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e4fb45ae6f34987af15c413d2ae9cb9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1732617332528 2024-11-26T10:35:38,805 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 036b5bf2520145b68dfb0f227b6a10b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732617333753 2024-11-26T10:35:38,805 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a83fdbd9baef459f94c5d941c5223fe8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732617335950 2024-11-26T10:35:38,810 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#C#compaction#405 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:38,810 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b956a72ef3aa4c0580d1568e97bca483 is 50, key is test_row_0/C:col10/1732617337079/Put/seqid=0 2024-11-26T10:35:38,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742299_1475 (size=12697) 2024-11-26T10:35:38,840 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/0705aa953bad45508a3fca0394237712 2024-11-26T10:35:38,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/098342455a254f3ca70912382b7c1327 is 50, key is test_row_0/C:col10/1732617337226/Put/seqid=0 2024-11-26T10:35:38,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742300_1476 (size=12151) 2024-11-26T10:35:39,216 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b956a72ef3aa4c0580d1568e97bca483 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b956a72ef3aa4c0580d1568e97bca483 2024-11-26T10:35:39,219 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/C of 4d8f0b5b9c9359e0eccc71ea40315b28 into b956a72ef3aa4c0580d1568e97bca483(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:39,219 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:39,219 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/C, priority=13, startTime=1732617338378; duration=0sec 2024-11-26T10:35:39,219 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:39,219 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:C 2024-11-26T10:35:39,248 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/098342455a254f3ca70912382b7c1327 2024-11-26T10:35:39,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/bab5891457f34db191d0e1b3f19922d6 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bab5891457f34db191d0e1b3f19922d6 2024-11-26T10:35:39,254 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bab5891457f34db191d0e1b3f19922d6, entries=150, sequenceid=245, filesize=11.9 K 2024-11-26T10:35:39,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/0705aa953bad45508a3fca0394237712 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/0705aa953bad45508a3fca0394237712 2024-11-26T10:35:39,257 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/0705aa953bad45508a3fca0394237712, entries=150, sequenceid=245, filesize=11.9 K 2024-11-26T10:35:39,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/098342455a254f3ca70912382b7c1327 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/098342455a254f3ca70912382b7c1327 2024-11-26T10:35:39,260 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/098342455a254f3ca70912382b7c1327, entries=150, sequenceid=245, filesize=11.9 K 2024-11-26T10:35:39,261 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 860ms, sequenceid=245, compaction requested=false 2024-11-26T10:35:39,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:39,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:39,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-26T10:35:39,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-26T10:35:39,263 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-26T10:35:39,263 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7770 sec 2024-11-26T10:35:39,263 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.7800 sec 2024-11-26T10:35:39,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:39,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-26T10:35:39,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:39,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:39,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:39,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:39,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:39,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:39,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/bb0d0a6112b14e45b63ad6ad1ac8cddf is 50, key is test_row_0/A:col10/1732617339370/Put/seqid=0 2024-11-26T10:35:39,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742301_1477 (size=17081) 2024-11-26T10:35:39,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:39,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617399437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:39,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:39,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617399438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:39,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:39,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:39,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617399543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:39,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617399543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:39,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-26T10:35:39,588 INFO [Thread-1913 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-26T10:35:39,589 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:39,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-26T10:35:39,590 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:39,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-26T10:35:39,590 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:39,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:39,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-26T10:35:39,742 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:39,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-26T10:35:39,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:39,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:39,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:39,742 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:39,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:39,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:39,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:39,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617399749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:39,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:39,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617399749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:39,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/bb0d0a6112b14e45b63ad6ad1ac8cddf 2024-11-26T10:35:39,787 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/93360feb494040948e91eddbd22332fe is 50, key is test_row_0/B:col10/1732617339370/Put/seqid=0 2024-11-26T10:35:39,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742302_1478 (size=12251) 2024-11-26T10:35:39,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-26T10:35:39,894 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:39,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-26T10:35:39,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:39,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:39,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:39,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:39,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,046 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:40,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-26T10:35:40,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:40,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:40,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:40,047 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:40,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617400053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:40,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:40,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617400054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:40,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/93360feb494040948e91eddbd22332fe 2024-11-26T10:35:40,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-26T10:35:40,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/f69be982e0634444b36a32d4e8f9c765 is 50, key is test_row_0/C:col10/1732617339370/Put/seqid=0 2024-11-26T10:35:40,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742303_1479 (size=12251) 2024-11-26T10:35:40,198 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:40,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-26T10:35:40,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:40,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:40,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:40,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,350 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:40,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-26T10:35:40,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:40,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:40,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:40,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,503 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:40,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-26T10:35:40,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:40,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:40,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:40,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:40,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:40,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617400558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:40,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:40,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617400562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:40,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/f69be982e0634444b36a32d4e8f9c765 2024-11-26T10:35:40,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/bb0d0a6112b14e45b63ad6ad1ac8cddf as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bb0d0a6112b14e45b63ad6ad1ac8cddf 2024-11-26T10:35:40,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bb0d0a6112b14e45b63ad6ad1ac8cddf, entries=250, sequenceid=259, filesize=16.7 K 2024-11-26T10:35:40,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/93360feb494040948e91eddbd22332fe as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/93360feb494040948e91eddbd22332fe 2024-11-26T10:35:40,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/93360feb494040948e91eddbd22332fe, entries=150, sequenceid=259, filesize=12.0 K 2024-11-26T10:35:40,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/f69be982e0634444b36a32d4e8f9c765 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f69be982e0634444b36a32d4e8f9c765 2024-11-26T10:35:40,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f69be982e0634444b36a32d4e8f9c765, entries=150, sequenceid=259, filesize=12.0 K 2024-11-26T10:35:40,611 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1236ms, sequenceid=259, compaction requested=true 2024-11-26T10:35:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:40,611 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:40,611 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:40,612 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41929 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:40,612 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:40,612 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/A is initiating minor compaction (all files) 2024-11-26T10:35:40,612 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/B is initiating minor compaction (all files) 2024-11-26T10:35:40,612 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/B in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:40,612 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/A in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:40,612 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/eb26aca1b2dd466194d9fd3a16294ed1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/0705aa953bad45508a3fca0394237712, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/93360feb494040948e91eddbd22332fe] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=36.2 K 2024-11-26T10:35:40,612 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/4ee041b569de401594935439522a85fc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bab5891457f34db191d0e1b3f19922d6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bb0d0a6112b14e45b63ad6ad1ac8cddf] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=40.9 K 2024-11-26T10:35:40,612 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting eb26aca1b2dd466194d9fd3a16294ed1, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732617335950 2024-11-26T10:35:40,612 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ee041b569de401594935439522a85fc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732617335950 2024-11-26T10:35:40,612 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 0705aa953bad45508a3fca0394237712, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732617337219 2024-11-26T10:35:40,612 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting bab5891457f34db191d0e1b3f19922d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732617337219 2024-11-26T10:35:40,612 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 93360feb494040948e91eddbd22332fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732617339370 2024-11-26T10:35:40,612 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb0d0a6112b14e45b63ad6ad1ac8cddf, keycount=250, bloomtype=ROW, size=16.7 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732617339368 2024-11-26T10:35:40,617 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#A#compaction#410 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:40,617 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#B#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:40,617 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/af61e24055b14450b9ea699cb9a81799 is 50, key is test_row_0/A:col10/1732617339370/Put/seqid=0 2024-11-26T10:35:40,617 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/65de02e238be4906addd9dace5e115c5 is 50, key is test_row_0/B:col10/1732617339370/Put/seqid=0 2024-11-26T10:35:40,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742304_1480 (size=12899) 2024-11-26T10:35:40,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742305_1481 (size=12899) 2024-11-26T10:35:40,655 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:40,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-26T10:35:40,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:40,656 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-26T10:35:40,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:40,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:40,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:40,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:40,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:40,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:40,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/8eb0f4f5be5d4fde90aab93c472f151f is 50, key is test_row_0/A:col10/1732617339437/Put/seqid=0 2024-11-26T10:35:40,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742306_1482 (size=12301) 2024-11-26T10:35:40,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-26T10:35:41,026 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/af61e24055b14450b9ea699cb9a81799 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/af61e24055b14450b9ea699cb9a81799 2024-11-26T10:35:41,026 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/65de02e238be4906addd9dace5e115c5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/65de02e238be4906addd9dace5e115c5 2024-11-26T10:35:41,031 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/B of 4d8f0b5b9c9359e0eccc71ea40315b28 into 65de02e238be4906addd9dace5e115c5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:41,031 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:41,031 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/B, priority=13, startTime=1732617340611; duration=0sec 2024-11-26T10:35:41,031 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:41,031 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:B 2024-11-26T10:35:41,031 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:41,031 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/A of 4d8f0b5b9c9359e0eccc71ea40315b28 into af61e24055b14450b9ea699cb9a81799(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:41,031 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:41,031 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/A, priority=13, startTime=1732617340611; duration=0sec 2024-11-26T10:35:41,031 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:41,031 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:A 2024-11-26T10:35:41,032 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:41,032 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/C is initiating minor compaction (all files) 2024-11-26T10:35:41,032 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/C in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:41,032 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b956a72ef3aa4c0580d1568e97bca483, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/098342455a254f3ca70912382b7c1327, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f69be982e0634444b36a32d4e8f9c765] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=36.2 K 2024-11-26T10:35:41,033 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b956a72ef3aa4c0580d1568e97bca483, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732617335950 2024-11-26T10:35:41,033 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 098342455a254f3ca70912382b7c1327, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732617337219 2024-11-26T10:35:41,033 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting f69be982e0634444b36a32d4e8f9c765, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732617339370 2024-11-26T10:35:41,039 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#C#compaction#413 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:41,040 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/18b94c79479b4b92999fc713797b9295 is 50, key is test_row_0/C:col10/1732617339370/Put/seqid=0 2024-11-26T10:35:41,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742307_1483 (size=12899) 2024-11-26T10:35:41,062 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/8eb0f4f5be5d4fde90aab93c472f151f 2024-11-26T10:35:41,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/86e4944f5ef648558932d085bf72e8ea is 50, key is test_row_0/B:col10/1732617339437/Put/seqid=0 2024-11-26T10:35:41,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742308_1484 (size=12301) 2024-11-26T10:35:41,446 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/18b94c79479b4b92999fc713797b9295 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/18b94c79479b4b92999fc713797b9295 2024-11-26T10:35:41,450 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/C of 4d8f0b5b9c9359e0eccc71ea40315b28 into 18b94c79479b4b92999fc713797b9295(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:41,450 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:41,450 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/C, priority=13, startTime=1732617340611; duration=0sec 2024-11-26T10:35:41,450 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:41,450 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:C 2024-11-26T10:35:41,475 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/86e4944f5ef648558932d085bf72e8ea 2024-11-26T10:35:41,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/75d24ed24e4d4752a34202eb09f7e2e4 is 50, key is test_row_0/C:col10/1732617339437/Put/seqid=0 2024-11-26T10:35:41,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742309_1485 (size=12301) 2024-11-26T10:35:41,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. as already flushing 2024-11-26T10:35:41,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:41,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:41,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617401589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:41,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:41,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617401594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:41,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-26T10:35:41,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:41,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617401695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:41,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:41,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617401700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:41,900 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/75d24ed24e4d4752a34202eb09f7e2e4 2024-11-26T10:35:41,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/8eb0f4f5be5d4fde90aab93c472f151f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/8eb0f4f5be5d4fde90aab93c472f151f 2024-11-26T10:35:41,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:41,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617401900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:41,907 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/8eb0f4f5be5d4fde90aab93c472f151f, entries=150, sequenceid=284, filesize=12.0 K 2024-11-26T10:35:41,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/86e4944f5ef648558932d085bf72e8ea as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/86e4944f5ef648558932d085bf72e8ea 2024-11-26T10:35:41,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:41,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617401906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:41,910 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/86e4944f5ef648558932d085bf72e8ea, entries=150, sequenceid=284, filesize=12.0 K 2024-11-26T10:35:41,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/75d24ed24e4d4752a34202eb09f7e2e4 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/75d24ed24e4d4752a34202eb09f7e2e4 2024-11-26T10:35:41,913 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/75d24ed24e4d4752a34202eb09f7e2e4, entries=150, sequenceid=284, filesize=12.0 K 2024-11-26T10:35:41,914 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1259ms, sequenceid=284, compaction requested=false 2024-11-26T10:35:41,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:41,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:41,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-26T10:35:41,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-26T10:35:41,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-26T10:35:41,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3250 sec 2024-11-26T10:35:41,920 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 2.3290 sec 2024-11-26T10:35:42,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:42,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-26T10:35:42,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:42,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:42,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:42,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:42,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:42,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:42,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/a877b396d67646ba83de900afc0522ff is 50, key is test_row_0/A:col10/1732617341593/Put/seqid=0 2024-11-26T10:35:42,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742310_1486 (size=14741) 2024-11-26T10:35:42,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617402279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:42,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617402280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:42,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617402385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:42,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617402385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:42,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617402590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:42,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617402591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:42,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/a877b396d67646ba83de900afc0522ff 2024-11-26T10:35:42,671 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/1d14f1fd8af54aed9f183696cbaeab58 is 50, key is test_row_0/B:col10/1732617341593/Put/seqid=0 2024-11-26T10:35:42,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742311_1487 (size=12301) 2024-11-26T10:35:42,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:42,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617402894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:42,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:42,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617402896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:42,958 DEBUG [Thread-1920 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3652e74d to 127.0.0.1:61934 2024-11-26T10:35:42,958 DEBUG [Thread-1918 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x496fe03f to 127.0.0.1:61934 2024-11-26T10:35:42,958 DEBUG [Thread-1920 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:42,958 DEBUG [Thread-1918 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:42,958 DEBUG [Thread-1914 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d672ed2 to 127.0.0.1:61934 2024-11-26T10:35:42,958 DEBUG [Thread-1914 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:42,962 DEBUG [Thread-1916 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cf40102 to 127.0.0.1:61934 2024-11-26T10:35:42,962 DEBUG [Thread-1922 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2405c04e to 127.0.0.1:61934 2024-11-26T10:35:42,963 DEBUG [Thread-1922 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:42,963 DEBUG [Thread-1916 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:43,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/1d14f1fd8af54aed9f183696cbaeab58 2024-11-26T10:35:43,087 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b25e12a431b442e1a6a83642589c6ed4 is 50, key is test_row_0/C:col10/1732617341593/Put/seqid=0 2024-11-26T10:35:43,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742312_1488 (size=12301) 2024-11-26T10:35:43,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:43,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34798 deadline: 1732617403401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:43,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:43,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34810 deadline: 1732617403405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:43,493 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b25e12a431b442e1a6a83642589c6ed4 2024-11-26T10:35:43,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/a877b396d67646ba83de900afc0522ff as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a877b396d67646ba83de900afc0522ff 2024-11-26T10:35:43,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a877b396d67646ba83de900afc0522ff, entries=200, sequenceid=299, filesize=14.4 K 2024-11-26T10:35:43,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/1d14f1fd8af54aed9f183696cbaeab58 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/1d14f1fd8af54aed9f183696cbaeab58 2024-11-26T10:35:43,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/1d14f1fd8af54aed9f183696cbaeab58, entries=150, sequenceid=299, filesize=12.0 K 2024-11-26T10:35:43,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/b25e12a431b442e1a6a83642589c6ed4 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b25e12a431b442e1a6a83642589c6ed4 2024-11-26T10:35:43,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b25e12a431b442e1a6a83642589c6ed4, entries=150, sequenceid=299, filesize=12.0 K 2024-11-26T10:35:43,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1309ms, sequenceid=299, compaction requested=true 2024-11-26T10:35:43,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:43,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:43,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:43,519 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:43,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:43,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:43,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d8f0b5b9c9359e0eccc71ea40315b28:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:43,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:43,519 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:43,519 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:43,519 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39941 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:43,519 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/A is initiating minor compaction (all files) 2024-11-26T10:35:43,519 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/B is initiating minor compaction (all files) 2024-11-26T10:35:43,519 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/B in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:43,519 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/A in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:43,519 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/65de02e238be4906addd9dace5e115c5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/86e4944f5ef648558932d085bf72e8ea, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/1d14f1fd8af54aed9f183696cbaeab58] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=36.6 K 2024-11-26T10:35:43,519 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/af61e24055b14450b9ea699cb9a81799, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/8eb0f4f5be5d4fde90aab93c472f151f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a877b396d67646ba83de900afc0522ff] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=39.0 K 2024-11-26T10:35:43,520 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 65de02e238be4906addd9dace5e115c5, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732617339370 2024-11-26T10:35:43,520 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting af61e24055b14450b9ea699cb9a81799, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732617339370 2024-11-26T10:35:43,520 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 86e4944f5ef648558932d085bf72e8ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732617339431 2024-11-26T10:35:43,520 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8eb0f4f5be5d4fde90aab93c472f151f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732617339431 2024-11-26T10:35:43,520 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d14f1fd8af54aed9f183696cbaeab58, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732617341584 2024-11-26T10:35:43,520 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting a877b396d67646ba83de900afc0522ff, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732617341584 2024-11-26T10:35:43,525 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#B#compaction#420 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:43,525 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#A#compaction#419 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:43,526 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/237238b0c20c4615975ef18b0b5e3f11 is 50, key is test_row_0/B:col10/1732617341593/Put/seqid=0 2024-11-26T10:35:43,526 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/49463f2fdde6423b86f983ce8b562635 is 50, key is test_row_0/A:col10/1732617341593/Put/seqid=0 2024-11-26T10:35:43,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742313_1489 (size=13051) 2024-11-26T10:35:43,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742314_1490 (size=13051) 2024-11-26T10:35:43,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-26T10:35:43,695 INFO [Thread-1913 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-26T10:35:43,938 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/49463f2fdde6423b86f983ce8b562635 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/49463f2fdde6423b86f983ce8b562635 2024-11-26T10:35:43,939 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/237238b0c20c4615975ef18b0b5e3f11 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/237238b0c20c4615975ef18b0b5e3f11 2024-11-26T10:35:43,942 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/B of 4d8f0b5b9c9359e0eccc71ea40315b28 into 237238b0c20c4615975ef18b0b5e3f11(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:43,942 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/A of 4d8f0b5b9c9359e0eccc71ea40315b28 into 49463f2fdde6423b86f983ce8b562635(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:43,942 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:43,942 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:43,942 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/A, priority=13, startTime=1732617343519; duration=0sec 2024-11-26T10:35:43,942 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/B, priority=13, startTime=1732617343519; duration=0sec 2024-11-26T10:35:43,942 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:43,942 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:43,942 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:B 2024-11-26T10:35:43,942 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:A 2024-11-26T10:35:43,942 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:43,943 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:43,943 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): 4d8f0b5b9c9359e0eccc71ea40315b28/C is initiating minor compaction (all files) 2024-11-26T10:35:43,943 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 4d8f0b5b9c9359e0eccc71ea40315b28/C in TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:43,943 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/18b94c79479b4b92999fc713797b9295, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/75d24ed24e4d4752a34202eb09f7e2e4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b25e12a431b442e1a6a83642589c6ed4] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp, totalSize=36.6 K 2024-11-26T10:35:43,943 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18b94c79479b4b92999fc713797b9295, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732617339370 2024-11-26T10:35:43,943 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75d24ed24e4d4752a34202eb09f7e2e4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732617339431 2024-11-26T10:35:43,943 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting b25e12a431b442e1a6a83642589c6ed4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732617341584 2024-11-26T10:35:43,949 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d8f0b5b9c9359e0eccc71ea40315b28#C#compaction#421 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:43,949 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/1197a3cb426e4af4943e8ed8ca545fa0 is 50, key is test_row_0/C:col10/1732617341593/Put/seqid=0 2024-11-26T10:35:43,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742315_1491 (size=13051) 2024-11-26T10:35:44,364 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/1197a3cb426e4af4943e8ed8ca545fa0 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/1197a3cb426e4af4943e8ed8ca545fa0 2024-11-26T10:35:44,369 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 4d8f0b5b9c9359e0eccc71ea40315b28/C of 4d8f0b5b9c9359e0eccc71ea40315b28 into 1197a3cb426e4af4943e8ed8ca545fa0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:44,369 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:44,369 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28., storeName=4d8f0b5b9c9359e0eccc71ea40315b28/C, priority=13, startTime=1732617343519; duration=0sec 2024-11-26T10:35:44,369 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:44,369 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d8f0b5b9c9359e0eccc71ea40315b28:C 2024-11-26T10:35:44,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:44,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-26T10:35:44,415 DEBUG [Thread-1907 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0de9f076 to 127.0.0.1:61934 2024-11-26T10:35:44,415 DEBUG [Thread-1907 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:44,415 DEBUG [Thread-1905 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f142b04 to 127.0.0.1:61934 2024-11-26T10:35:44,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:44,415 DEBUG [Thread-1905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:44,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:44,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:44,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:44,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:44,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:44,421 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/87f44dd3ec704ea1942655e22cf9378b is 50, key is test_row_0/A:col10/1732617344412/Put/seqid=0 2024-11-26T10:35:44,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742316_1492 (size=12301) 2024-11-26T10:35:44,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/87f44dd3ec704ea1942655e22cf9378b 2024-11-26T10:35:44,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/ba65937d49ef429686e65a8994ed2942 is 50, key is test_row_0/B:col10/1732617344412/Put/seqid=0 2024-11-26T10:35:44,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742317_1493 (size=12301) 2024-11-26T10:35:45,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/ba65937d49ef429686e65a8994ed2942 2024-11-26T10:35:45,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/1e85145e3e6a4933824753f23c0d5f34 is 50, key is test_row_0/C:col10/1732617344412/Put/seqid=0 2024-11-26T10:35:45,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742318_1494 (size=12301) 2024-11-26T10:35:45,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/1e85145e3e6a4933824753f23c0d5f34 2024-11-26T10:35:45,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/87f44dd3ec704ea1942655e22cf9378b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/87f44dd3ec704ea1942655e22cf9378b 2024-11-26T10:35:45,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/87f44dd3ec704ea1942655e22cf9378b, entries=150, sequenceid=328, filesize=12.0 K 2024-11-26T10:35:45,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/ba65937d49ef429686e65a8994ed2942 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/ba65937d49ef429686e65a8994ed2942 2024-11-26T10:35:45,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/ba65937d49ef429686e65a8994ed2942, entries=150, sequenceid=328, filesize=12.0 K 2024-11-26T10:35:45,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/1e85145e3e6a4933824753f23c0d5f34 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/1e85145e3e6a4933824753f23c0d5f34 2024-11-26T10:35:45,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/1e85145e3e6a4933824753f23c0d5f34, entries=150, sequenceid=328, filesize=12.0 K 2024-11-26T10:35:45,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=0 B/0 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1273ms, sequenceid=328, compaction requested=false 2024-11-26T10:35:45,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:47,262 DEBUG [Thread-1903 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79b10416 to 127.0.0.1:61934 2024-11-26T10:35:47,262 DEBUG [Thread-1903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:47,266 DEBUG [Thread-1911 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ed69825 to 127.0.0.1:61934 2024-11-26T10:35:47,266 DEBUG [Thread-1911 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:47,288 DEBUG [Thread-1909 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4414259d to 127.0.0.1:61934 2024-11-26T10:35:47,288 DEBUG [Thread-1909 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 25 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 128 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 26 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 25 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2868 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8601 rows 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2865 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8595 rows 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2857 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8571 rows 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2856 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8568 rows 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2857 2024-11-26T10:35:47,289 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8570 rows 2024-11-26T10:35:47,289 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-26T10:35:47,289 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c60eb7d to 127.0.0.1:61934 2024-11-26T10:35:47,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:35:47,293 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-26T10:35:47,294 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-26T10:35:47,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:47,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-26T10:35:47,297 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617347297"}]},"ts":"1732617347297"} 2024-11-26T10:35:47,298 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-26T10:35:47,351 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-26T10:35:47,353 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-26T10:35:47,355 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4d8f0b5b9c9359e0eccc71ea40315b28, UNASSIGN}] 2024-11-26T10:35:47,356 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=4d8f0b5b9c9359e0eccc71ea40315b28, UNASSIGN 2024-11-26T10:35:47,357 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=4d8f0b5b9c9359e0eccc71ea40315b28, regionState=CLOSING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:47,358 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-26T10:35:47,358 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; CloseRegionProcedure 4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:35:47,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-26T10:35:47,510 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:47,511 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(124): Close 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:47,512 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-26T10:35:47,512 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1681): Closing 4d8f0b5b9c9359e0eccc71ea40315b28, disabling compactions & flushes 2024-11-26T10:35:47,512 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:47,512 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:47,512 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. after waiting 0 ms 2024-11-26T10:35:47,512 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:47,513 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(2837): Flushing 4d8f0b5b9c9359e0eccc71ea40315b28 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-26T10:35:47,513 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=A 2024-11-26T10:35:47,513 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:47,513 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=B 2024-11-26T10:35:47,514 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:47,514 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 4d8f0b5b9c9359e0eccc71ea40315b28, store=C 2024-11-26T10:35:47,514 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:47,521 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/6620213c9ac242c9aaf3e737b8676be1 is 50, key is test_row_0/A:col10/1732617347263/Put/seqid=0 2024-11-26T10:35:47,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742319_1495 (size=9857) 2024-11-26T10:35:47,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-26T10:35:47,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-26T10:35:47,929 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/6620213c9ac242c9aaf3e737b8676be1 2024-11-26T10:35:47,941 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/1cd3f7009de04c5ea51e2268ed19a9db is 50, key is test_row_0/B:col10/1732617347263/Put/seqid=0 2024-11-26T10:35:47,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742320_1496 (size=9857) 2024-11-26T10:35:48,346 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/1cd3f7009de04c5ea51e2268ed19a9db 2024-11-26T10:35:48,358 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/ed07fed98d4e48e78cd894d4f79ffbbd is 50, key is test_row_0/C:col10/1732617347263/Put/seqid=0 2024-11-26T10:35:48,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742321_1497 (size=9857) 2024-11-26T10:35:48,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-26T10:35:48,764 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/ed07fed98d4e48e78cd894d4f79ffbbd 2024-11-26T10:35:48,775 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/A/6620213c9ac242c9aaf3e737b8676be1 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6620213c9ac242c9aaf3e737b8676be1 2024-11-26T10:35:48,779 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6620213c9ac242c9aaf3e737b8676be1, entries=100, sequenceid=334, filesize=9.6 K 2024-11-26T10:35:48,780 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/B/1cd3f7009de04c5ea51e2268ed19a9db as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/1cd3f7009de04c5ea51e2268ed19a9db 2024-11-26T10:35:48,783 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/1cd3f7009de04c5ea51e2268ed19a9db, entries=100, sequenceid=334, filesize=9.6 K 2024-11-26T10:35:48,784 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/.tmp/C/ed07fed98d4e48e78cd894d4f79ffbbd as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/ed07fed98d4e48e78cd894d4f79ffbbd 2024-11-26T10:35:48,788 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/ed07fed98d4e48e78cd894d4f79ffbbd, entries=100, sequenceid=334, filesize=9.6 K 2024-11-26T10:35:48,789 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 4d8f0b5b9c9359e0eccc71ea40315b28 in 1276ms, sequenceid=334, compaction requested=true 2024-11-26T10:35:48,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/153fcf6dff8f433c90bd5e7ca8387ea3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/fcc37984f0c94c45ade74a22ef53ab55, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/dbd09fae26464be6ac476274fa248e4a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/78558302b45a444fbe06360463c74c03, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/1bfc8759d24e4d559a274f116736a3a6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/0f6c41413bff42669900425c9a9e3192, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6f310621b4d0417ba3bae26affb1121c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/863ba5a171fc4301b054e439e749f093, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/5df7a51c904b409bb992bcb9c2f672c5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/04104f12534b4b68b870c9b3a7bde2cb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a86e25eb01884916b50afce6f148a47e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a616f4c4619c47348c9627a29b9ab793, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/2438690c48dc48e0a09fe439a1ffba32, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/c4c452440c814e4986859280015171a3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6007163654994c56b504d7579e388727, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/c98eb7f7eb8f4bdf957d147aeda74162, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/4ee041b569de401594935439522a85fc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bab5891457f34db191d0e1b3f19922d6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bb0d0a6112b14e45b63ad6ad1ac8cddf, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/af61e24055b14450b9ea699cb9a81799, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/8eb0f4f5be5d4fde90aab93c472f151f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a877b396d67646ba83de900afc0522ff] to archive 2024-11-26T10:35:48,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:35:48,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/153fcf6dff8f433c90bd5e7ca8387ea3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/153fcf6dff8f433c90bd5e7ca8387ea3 2024-11-26T10:35:48,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/fcc37984f0c94c45ade74a22ef53ab55 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/fcc37984f0c94c45ade74a22ef53ab55 2024-11-26T10:35:48,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/dbd09fae26464be6ac476274fa248e4a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/dbd09fae26464be6ac476274fa248e4a 2024-11-26T10:35:48,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/78558302b45a444fbe06360463c74c03 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/78558302b45a444fbe06360463c74c03 2024-11-26T10:35:48,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/1bfc8759d24e4d559a274f116736a3a6 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/1bfc8759d24e4d559a274f116736a3a6 2024-11-26T10:35:48,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/0f6c41413bff42669900425c9a9e3192 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/0f6c41413bff42669900425c9a9e3192 2024-11-26T10:35:48,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6f310621b4d0417ba3bae26affb1121c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6f310621b4d0417ba3bae26affb1121c 2024-11-26T10:35:48,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/863ba5a171fc4301b054e439e749f093 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/863ba5a171fc4301b054e439e749f093 2024-11-26T10:35:48,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/5df7a51c904b409bb992bcb9c2f672c5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/5df7a51c904b409bb992bcb9c2f672c5 2024-11-26T10:35:48,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/04104f12534b4b68b870c9b3a7bde2cb to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/04104f12534b4b68b870c9b3a7bde2cb 2024-11-26T10:35:48,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a86e25eb01884916b50afce6f148a47e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a86e25eb01884916b50afce6f148a47e 2024-11-26T10:35:48,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a616f4c4619c47348c9627a29b9ab793 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a616f4c4619c47348c9627a29b9ab793 2024-11-26T10:35:48,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/2438690c48dc48e0a09fe439a1ffba32 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/2438690c48dc48e0a09fe439a1ffba32 2024-11-26T10:35:48,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/c4c452440c814e4986859280015171a3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/c4c452440c814e4986859280015171a3 2024-11-26T10:35:48,809 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6007163654994c56b504d7579e388727 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6007163654994c56b504d7579e388727 2024-11-26T10:35:48,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/c98eb7f7eb8f4bdf957d147aeda74162 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/c98eb7f7eb8f4bdf957d147aeda74162 2024-11-26T10:35:48,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/4ee041b569de401594935439522a85fc to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/4ee041b569de401594935439522a85fc 2024-11-26T10:35:48,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bab5891457f34db191d0e1b3f19922d6 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bab5891457f34db191d0e1b3f19922d6 2024-11-26T10:35:48,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bb0d0a6112b14e45b63ad6ad1ac8cddf to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/bb0d0a6112b14e45b63ad6ad1ac8cddf 2024-11-26T10:35:48,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/af61e24055b14450b9ea699cb9a81799 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/af61e24055b14450b9ea699cb9a81799 2024-11-26T10:35:48,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/8eb0f4f5be5d4fde90aab93c472f151f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/8eb0f4f5be5d4fde90aab93c472f151f 2024-11-26T10:35:48,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a877b396d67646ba83de900afc0522ff to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/a877b396d67646ba83de900afc0522ff 2024-11-26T10:35:48,822 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/9289fa94d87e4c6d8549e0eda12dd9bc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6f9ac860af7d4e13b10d51bdcb7de0e3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/7ad21eba13454eb297af499091df7b98, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/31b928f44d2740ab993002d534b21a9c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/16662a7059624af584a451b1baa2ea41, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/425d2ec3e4c74a8480e9a70b02e6d599, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/146a858013fa46f7bbe9407a76f0d094, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6fce3b54059c4b1089e4b30c41e941e7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/dbaf68aa56e54a7c813c25b965a79a15, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/28787471414345f0b731e0ea5d91e865, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/b6af5af8eb774bb0b7f07e9b41ac95c9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/c5c2a0f8b5d84a6c8652b5d9e0337bd7, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/d4b85a5c7d2a4d069ae88b865a235f30, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/02d21a33fbcd400eb24cfc21127b15c3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/db16460f44ec4445b4c4fbdff8b38e3c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/eb26aca1b2dd466194d9fd3a16294ed1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/852fa31b4f574ea0ba304f11b5774d5a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/0705aa953bad45508a3fca0394237712, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/65de02e238be4906addd9dace5e115c5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/93360feb494040948e91eddbd22332fe, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/86e4944f5ef648558932d085bf72e8ea, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/1d14f1fd8af54aed9f183696cbaeab58] to archive 2024-11-26T10:35:48,823 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:35:48,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/9289fa94d87e4c6d8549e0eda12dd9bc to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/9289fa94d87e4c6d8549e0eda12dd9bc 2024-11-26T10:35:48,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6f9ac860af7d4e13b10d51bdcb7de0e3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6f9ac860af7d4e13b10d51bdcb7de0e3 2024-11-26T10:35:48,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/7ad21eba13454eb297af499091df7b98 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/7ad21eba13454eb297af499091df7b98 2024-11-26T10:35:48,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/31b928f44d2740ab993002d534b21a9c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/31b928f44d2740ab993002d534b21a9c 2024-11-26T10:35:48,830 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/16662a7059624af584a451b1baa2ea41 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/16662a7059624af584a451b1baa2ea41 2024-11-26T10:35:48,831 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/425d2ec3e4c74a8480e9a70b02e6d599 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/425d2ec3e4c74a8480e9a70b02e6d599 2024-11-26T10:35:48,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/146a858013fa46f7bbe9407a76f0d094 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/146a858013fa46f7bbe9407a76f0d094 2024-11-26T10:35:48,834 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6fce3b54059c4b1089e4b30c41e941e7 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/6fce3b54059c4b1089e4b30c41e941e7 2024-11-26T10:35:48,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/dbaf68aa56e54a7c813c25b965a79a15 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/dbaf68aa56e54a7c813c25b965a79a15 2024-11-26T10:35:48,836 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/28787471414345f0b731e0ea5d91e865 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/28787471414345f0b731e0ea5d91e865 2024-11-26T10:35:48,838 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/b6af5af8eb774bb0b7f07e9b41ac95c9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/b6af5af8eb774bb0b7f07e9b41ac95c9 2024-11-26T10:35:48,839 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/c5c2a0f8b5d84a6c8652b5d9e0337bd7 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/c5c2a0f8b5d84a6c8652b5d9e0337bd7 2024-11-26T10:35:48,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/d4b85a5c7d2a4d069ae88b865a235f30 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/d4b85a5c7d2a4d069ae88b865a235f30 2024-11-26T10:35:48,841 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/02d21a33fbcd400eb24cfc21127b15c3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/02d21a33fbcd400eb24cfc21127b15c3 2024-11-26T10:35:48,843 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/db16460f44ec4445b4c4fbdff8b38e3c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/db16460f44ec4445b4c4fbdff8b38e3c 2024-11-26T10:35:48,844 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/eb26aca1b2dd466194d9fd3a16294ed1 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/eb26aca1b2dd466194d9fd3a16294ed1 2024-11-26T10:35:48,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/852fa31b4f574ea0ba304f11b5774d5a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/852fa31b4f574ea0ba304f11b5774d5a 2024-11-26T10:35:48,847 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/0705aa953bad45508a3fca0394237712 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/0705aa953bad45508a3fca0394237712 2024-11-26T10:35:48,848 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/65de02e238be4906addd9dace5e115c5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/65de02e238be4906addd9dace5e115c5 2024-11-26T10:35:48,850 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/93360feb494040948e91eddbd22332fe to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/93360feb494040948e91eddbd22332fe 2024-11-26T10:35:48,851 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/86e4944f5ef648558932d085bf72e8ea to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/86e4944f5ef648558932d085bf72e8ea 2024-11-26T10:35:48,852 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/1d14f1fd8af54aed9f183696cbaeab58 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/1d14f1fd8af54aed9f183696cbaeab58 2024-11-26T10:35:48,853 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/61c94bfcc0994e0180f22a89a7a1a5f2, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b886fffbb2794f6baaf75615fb626473, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b51a2a1baa224d3888e3993faefc3290, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f47a26c67f6b4107af4d5b33a334abd8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/0c9d0deaed6b4d869bb93ab55a299b7d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b98fbb3930b748099e5c17cd3b77e1d9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/af427ebca65b463297d991eaa2ccfbeb, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/e34137eabcf347dabfa923e980585b3b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/021d6239126c411ca61d53062aa65a76, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/08323784fc5c428fab316a6ca87793a9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2fa1c1968c154a9d8c2a606345caf5c0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2cbf02fd7c0b4f8ab2cefc735661a457, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/6e4fb45ae6f34987af15c413d2ae9cb9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/972c9ca636f241fe96d5aa66f1ddec35, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/036b5bf2520145b68dfb0f227b6a10b6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b956a72ef3aa4c0580d1568e97bca483, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/a83fdbd9baef459f94c5d941c5223fe8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/098342455a254f3ca70912382b7c1327, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/18b94c79479b4b92999fc713797b9295, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f69be982e0634444b36a32d4e8f9c765, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/75d24ed24e4d4752a34202eb09f7e2e4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b25e12a431b442e1a6a83642589c6ed4] to archive 2024-11-26T10:35:48,854 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:35:48,856 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/61c94bfcc0994e0180f22a89a7a1a5f2 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/61c94bfcc0994e0180f22a89a7a1a5f2 2024-11-26T10:35:48,857 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b886fffbb2794f6baaf75615fb626473 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b886fffbb2794f6baaf75615fb626473 2024-11-26T10:35:48,859 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b51a2a1baa224d3888e3993faefc3290 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b51a2a1baa224d3888e3993faefc3290 2024-11-26T10:35:48,860 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f47a26c67f6b4107af4d5b33a334abd8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f47a26c67f6b4107af4d5b33a334abd8 2024-11-26T10:35:48,861 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/0c9d0deaed6b4d869bb93ab55a299b7d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/0c9d0deaed6b4d869bb93ab55a299b7d 2024-11-26T10:35:48,863 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b98fbb3930b748099e5c17cd3b77e1d9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b98fbb3930b748099e5c17cd3b77e1d9 2024-11-26T10:35:48,864 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/af427ebca65b463297d991eaa2ccfbeb to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/af427ebca65b463297d991eaa2ccfbeb 2024-11-26T10:35:48,866 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/e34137eabcf347dabfa923e980585b3b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/e34137eabcf347dabfa923e980585b3b 2024-11-26T10:35:48,867 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/021d6239126c411ca61d53062aa65a76 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/021d6239126c411ca61d53062aa65a76 2024-11-26T10:35:48,869 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/08323784fc5c428fab316a6ca87793a9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/08323784fc5c428fab316a6ca87793a9 2024-11-26T10:35:48,870 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2fa1c1968c154a9d8c2a606345caf5c0 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2fa1c1968c154a9d8c2a606345caf5c0 2024-11-26T10:35:48,872 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2cbf02fd7c0b4f8ab2cefc735661a457 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/2cbf02fd7c0b4f8ab2cefc735661a457 2024-11-26T10:35:48,874 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/6e4fb45ae6f34987af15c413d2ae9cb9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/6e4fb45ae6f34987af15c413d2ae9cb9 2024-11-26T10:35:48,875 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/972c9ca636f241fe96d5aa66f1ddec35 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/972c9ca636f241fe96d5aa66f1ddec35 2024-11-26T10:35:48,877 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/036b5bf2520145b68dfb0f227b6a10b6 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/036b5bf2520145b68dfb0f227b6a10b6 2024-11-26T10:35:48,879 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b956a72ef3aa4c0580d1568e97bca483 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b956a72ef3aa4c0580d1568e97bca483 2024-11-26T10:35:48,881 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/a83fdbd9baef459f94c5d941c5223fe8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/a83fdbd9baef459f94c5d941c5223fe8 2024-11-26T10:35:48,882 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/098342455a254f3ca70912382b7c1327 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/098342455a254f3ca70912382b7c1327 2024-11-26T10:35:48,884 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/18b94c79479b4b92999fc713797b9295 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/18b94c79479b4b92999fc713797b9295 2024-11-26T10:35:48,885 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f69be982e0634444b36a32d4e8f9c765 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/f69be982e0634444b36a32d4e8f9c765 2024-11-26T10:35:48,887 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/75d24ed24e4d4752a34202eb09f7e2e4 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/75d24ed24e4d4752a34202eb09f7e2e4 2024-11-26T10:35:48,888 DEBUG [StoreCloser-TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b25e12a431b442e1a6a83642589c6ed4 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/b25e12a431b442e1a6a83642589c6ed4 2024-11-26T10:35:48,893 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/recovered.edits/337.seqid, newMaxSeqId=337, maxSeqId=1 2024-11-26T10:35:48,894 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28. 2024-11-26T10:35:48,894 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1635): Region close journal for 4d8f0b5b9c9359e0eccc71ea40315b28: 2024-11-26T10:35:48,896 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(170): Closed 4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:48,897 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=4d8f0b5b9c9359e0eccc71ea40315b28, regionState=CLOSED 2024-11-26T10:35:48,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-26T10:35:48,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; CloseRegionProcedure 4d8f0b5b9c9359e0eccc71ea40315b28, server=ccf62758a0a5,45419,1732617185877 in 1.5400 sec 2024-11-26T10:35:48,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-11-26T10:35:48,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=4d8f0b5b9c9359e0eccc71ea40315b28, UNASSIGN in 1.5450 sec 2024-11-26T10:35:48,903 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-26T10:35:48,903 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5490 sec 2024-11-26T10:35:48,904 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617348904"}]},"ts":"1732617348904"} 2024-11-26T10:35:48,904 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-26T10:35:48,941 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-26T10:35:48,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6480 sec 2024-11-26T10:35:49,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-26T10:35:49,407 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-26T10:35:49,409 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-26T10:35:49,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:49,412 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:49,414 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=144, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:49,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-26T10:35:49,417 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:49,422 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/recovered.edits] 2024-11-26T10:35:49,427 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/49463f2fdde6423b86f983ce8b562635 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/49463f2fdde6423b86f983ce8b562635 2024-11-26T10:35:49,429 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6620213c9ac242c9aaf3e737b8676be1 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/6620213c9ac242c9aaf3e737b8676be1 2024-11-26T10:35:49,432 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/87f44dd3ec704ea1942655e22cf9378b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/A/87f44dd3ec704ea1942655e22cf9378b 2024-11-26T10:35:49,437 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/1cd3f7009de04c5ea51e2268ed19a9db to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/1cd3f7009de04c5ea51e2268ed19a9db 2024-11-26T10:35:49,439 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/237238b0c20c4615975ef18b0b5e3f11 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/237238b0c20c4615975ef18b0b5e3f11 2024-11-26T10:35:49,442 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/ba65937d49ef429686e65a8994ed2942 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/B/ba65937d49ef429686e65a8994ed2942 2024-11-26T10:35:49,447 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/1197a3cb426e4af4943e8ed8ca545fa0 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/1197a3cb426e4af4943e8ed8ca545fa0 2024-11-26T10:35:49,448 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/1e85145e3e6a4933824753f23c0d5f34 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/1e85145e3e6a4933824753f23c0d5f34 2024-11-26T10:35:49,449 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/ed07fed98d4e48e78cd894d4f79ffbbd to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/C/ed07fed98d4e48e78cd894d4f79ffbbd 2024-11-26T10:35:49,450 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/recovered.edits/337.seqid to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28/recovered.edits/337.seqid 2024-11-26T10:35:49,451 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/4d8f0b5b9c9359e0eccc71ea40315b28 2024-11-26T10:35:49,451 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-26T10:35:49,452 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=144, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:49,453 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-26T10:35:49,455 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-26T10:35:49,455 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=144, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:49,455 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-26T10:35:49,456 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732617349456"}]},"ts":"9223372036854775807"} 2024-11-26T10:35:49,457 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-26T10:35:49,457 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4d8f0b5b9c9359e0eccc71ea40315b28, NAME => 'TestAcidGuarantees,,1732617321740.4d8f0b5b9c9359e0eccc71ea40315b28.', STARTKEY => '', ENDKEY => ''}] 2024-11-26T10:35:49,457 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-26T10:35:49,457 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732617349457"}]},"ts":"9223372036854775807"} 2024-11-26T10:35:49,458 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-26T10:35:49,501 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=144, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:49,503 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 92 msec 2024-11-26T10:35:49,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-26T10:35:49,516 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-26T10:35:49,530 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=239 (was 241), OpenFileDescriptor=450 (was 457), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=341 (was 324) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5246 (was 5264) 2024-11-26T10:35:49,539 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=239, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=341, ProcessCount=11, AvailableMemoryMB=5246 2024-11-26T10:35:49,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-26T10:35:49,541 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:35:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:49,544 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-26T10:35:49,544 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:49,544 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 145 2024-11-26T10:35:49,544 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-26T10:35:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-26T10:35:49,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742322_1498 (size=963) 2024-11-26T10:35:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-26T10:35:49,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-26T10:35:49,956 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1 2024-11-26T10:35:49,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742323_1499 (size=53) 2024-11-26T10:35:50,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-26T10:35:50,367 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:35:50,368 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d75570331a075e2a47f6b93a7b93d8ef, disabling compactions & flushes 2024-11-26T10:35:50,368 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:50,368 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:50,368 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. after waiting 0 ms 2024-11-26T10:35:50,368 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:50,368 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:50,368 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:35:50,370 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-26T10:35:50,370 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732617350370"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732617350370"}]},"ts":"1732617350370"} 2024-11-26T10:35:50,372 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-26T10:35:50,374 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-26T10:35:50,374 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617350374"}]},"ts":"1732617350374"} 2024-11-26T10:35:50,376 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-26T10:35:50,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d75570331a075e2a47f6b93a7b93d8ef, ASSIGN}] 2024-11-26T10:35:50,428 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d75570331a075e2a47f6b93a7b93d8ef, ASSIGN 2024-11-26T10:35:50,429 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d75570331a075e2a47f6b93a7b93d8ef, ASSIGN; state=OFFLINE, location=ccf62758a0a5,45419,1732617185877; forceNewPlan=false, retain=false 2024-11-26T10:35:50,580 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=d75570331a075e2a47f6b93a7b93d8ef, regionState=OPENING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:50,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; OpenRegionProcedure d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:35:50,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-26T10:35:50,734 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:50,740 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:50,740 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7285): Opening region: {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:35:50,741 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:50,741 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:35:50,741 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7327): checking encryption for d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:50,742 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7330): checking classloading for d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:50,745 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:50,746 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:35:50,746 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d75570331a075e2a47f6b93a7b93d8ef columnFamilyName A 2024-11-26T10:35:50,746 DEBUG [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:50,747 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(327): Store=d75570331a075e2a47f6b93a7b93d8ef/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:35:50,747 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:50,748 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:35:50,748 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d75570331a075e2a47f6b93a7b93d8ef columnFamilyName B 2024-11-26T10:35:50,748 DEBUG [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:50,749 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(327): Store=d75570331a075e2a47f6b93a7b93d8ef/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:35:50,749 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:50,749 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:35:50,750 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d75570331a075e2a47f6b93a7b93d8ef columnFamilyName C 2024-11-26T10:35:50,750 DEBUG [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:50,750 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(327): Store=d75570331a075e2a47f6b93a7b93d8ef/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:35:50,750 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:50,751 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:50,751 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:50,752 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:35:50,753 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1085): writing seq id for d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:50,754 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-26T10:35:50,755 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1102): Opened d75570331a075e2a47f6b93a7b93d8ef; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69493198, jitterRate=0.03552934527397156}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:35:50,755 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1001): Region open journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:35:50,756 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., pid=147, masterSystemTime=1732617350733 2024-11-26T10:35:50,757 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:50,757 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:50,757 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=d75570331a075e2a47f6b93a7b93d8ef, regionState=OPEN, openSeqNum=2, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:50,759 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-26T10:35:50,759 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; OpenRegionProcedure d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 in 177 msec 2024-11-26T10:35:50,760 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-11-26T10:35:50,760 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d75570331a075e2a47f6b93a7b93d8ef, ASSIGN in 333 msec 2024-11-26T10:35:50,761 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-26T10:35:50,761 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617350761"}]},"ts":"1732617350761"} 2024-11-26T10:35:50,761 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-26T10:35:50,768 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-26T10:35:50,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2260 sec 2024-11-26T10:35:51,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-11-26T10:35:51,654 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 145 completed 2024-11-26T10:35:51,657 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d9954b7 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fb684eb 2024-11-26T10:35:51,722 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@537a66f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:51,726 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:51,728 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34438, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:51,730 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-26T10:35:51,732 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53424, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-26T10:35:51,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-26T10:35:51,734 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-26T10:35:51,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-26T10:35:51,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742324_1500 (size=999) 2024-11-26T10:35:52,151 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-26T10:35:52,152 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-26T10:35:52,156 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-26T10:35:52,159 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d75570331a075e2a47f6b93a7b93d8ef, REOPEN/MOVE}] 2024-11-26T10:35:52,161 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d75570331a075e2a47f6b93a7b93d8ef, REOPEN/MOVE 2024-11-26T10:35:52,162 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=d75570331a075e2a47f6b93a7b93d8ef, regionState=CLOSING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:52,164 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-26T10:35:52,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; CloseRegionProcedure d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:35:52,317 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:52,318 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(124): Close d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,318 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-26T10:35:52,318 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1681): Closing d75570331a075e2a47f6b93a7b93d8ef, disabling compactions & flushes 2024-11-26T10:35:52,318 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:52,318 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:52,318 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. after waiting 0 ms 2024-11-26T10:35:52,318 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:52,326 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-26T10:35:52,327 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:52,327 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1635): Region close journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:35:52,327 WARN [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionServer(3786): Not adding moved region record: d75570331a075e2a47f6b93a7b93d8ef to self. 2024-11-26T10:35:52,328 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(170): Closed d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,329 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=d75570331a075e2a47f6b93a7b93d8ef, regionState=CLOSED 2024-11-26T10:35:52,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-26T10:35:52,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseRegionProcedure d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 in 165 msec 2024-11-26T10:35:52,331 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d75570331a075e2a47f6b93a7b93d8ef, REOPEN/MOVE; state=CLOSED, location=ccf62758a0a5,45419,1732617185877; forceNewPlan=false, retain=true 2024-11-26T10:35:52,482 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=d75570331a075e2a47f6b93a7b93d8ef, regionState=OPENING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:52,484 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE; OpenRegionProcedure d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:35:52,638 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:52,644 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:52,644 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7285): Opening region: {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} 2024-11-26T10:35:52,645 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,645 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-26T10:35:52,646 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7327): checking encryption for d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,646 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7330): checking classloading for d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,648 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,649 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:35:52,650 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d75570331a075e2a47f6b93a7b93d8ef columnFamilyName A 2024-11-26T10:35:52,651 DEBUG [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:52,652 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(327): Store=d75570331a075e2a47f6b93a7b93d8ef/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:35:52,653 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,653 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:35:52,654 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d75570331a075e2a47f6b93a7b93d8ef columnFamilyName B 2024-11-26T10:35:52,654 DEBUG [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:52,654 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(327): Store=d75570331a075e2a47f6b93a7b93d8ef/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:35:52,654 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,655 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-26T10:35:52,655 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d75570331a075e2a47f6b93a7b93d8ef columnFamilyName C 2024-11-26T10:35:52,655 DEBUG [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:52,656 INFO [StoreOpener-d75570331a075e2a47f6b93a7b93d8ef-1 {}] regionserver.HStore(327): Store=d75570331a075e2a47f6b93a7b93d8ef/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-26T10:35:52,656 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:52,657 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,658 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,660 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-26T10:35:52,662 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1085): writing seq id for d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,663 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1102): Opened d75570331a075e2a47f6b93a7b93d8ef; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64158001, jitterRate=-0.043971285223960876}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-26T10:35:52,664 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1001): Region open journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:35:52,664 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., pid=152, masterSystemTime=1732617352638 2024-11-26T10:35:52,666 DEBUG [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:52,666 INFO [RS_OPEN_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:52,666 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=d75570331a075e2a47f6b93a7b93d8ef, regionState=OPEN, openSeqNum=5, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:52,669 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=150 2024-11-26T10:35:52,669 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=150, state=SUCCESS; OpenRegionProcedure d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 in 183 msec 2024-11-26T10:35:52,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-11-26T10:35:52,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d75570331a075e2a47f6b93a7b93d8ef, REOPEN/MOVE in 510 msec 2024-11-26T10:35:52,673 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-26T10:35:52,673 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 515 msec 2024-11-26T10:35:52,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 939 msec 2024-11-26T10:35:52,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-26T10:35:52,679 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d5efb7a to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@644b7e6 2024-11-26T10:35:52,726 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6094c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:52,727 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fc332d8 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c9b5141 2024-11-26T10:35:52,736 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@103dfc6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:52,737 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17327621 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11a52cdf 2024-11-26T10:35:52,750 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e047c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:52,751 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1584f18a to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d7fe431 2024-11-26T10:35:52,821 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d631a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:52,822 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b914bf4 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91d72db 2024-11-26T10:35:52,921 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58971172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:52,923 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d836f78 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d7fe93b 2024-11-26T10:35:52,943 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7846cb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:52,944 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53305d9b to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11c440f7 2024-11-26T10:35:52,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1754bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:52,952 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bb6288a to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58460ef3 2024-11-26T10:35:52,960 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9113f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:52,960 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-11-26T10:35:52,968 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:52,969 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x458a85fd to 127.0.0.1:61934 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d832d43 2024-11-26T10:35:52,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1d3a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-26T10:35:52,979 DEBUG [hconnection-0x515c1bab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:52,980 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:52,980 DEBUG [hconnection-0x7d54c60-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:52,980 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34450, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:52,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-11-26T10:35:52,981 DEBUG [hconnection-0x416f9560-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:52,981 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:52,982 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:52,982 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34474, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:52,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-26T10:35:52,982 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:52,983 DEBUG [hconnection-0x3b8481e3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:52,983 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:52,984 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:52,991 DEBUG [hconnection-0x1bafd3f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:52,992 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34504, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:52,992 DEBUG [hconnection-0xd0fa45b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:52,992 DEBUG [hconnection-0x6ae177cd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:52,992 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34506, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:52,993 DEBUG [hconnection-0x176b3278-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:52,993 DEBUG [hconnection-0x19b53e60-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:52,993 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34508, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:52,993 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34528, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:52,994 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34534, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:52,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:52,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-26T10:35:52,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:35:52,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:52,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:35:52,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:52,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:35:52,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:53,007 DEBUG [hconnection-0x128e2803-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-26T10:35:53,008 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34544, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-26T10:35:53,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617413005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617413006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617413007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617413009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617413011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126764168e6184e493dbca07e7b21eec5f7_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617352995/Put/seqid=0 2024-11-26T10:35:53,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742325_1501 (size=12154) 2024-11-26T10:35:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-26T10:35:53,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617413109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617413109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617413110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617413111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617413112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,134 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:53,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:53,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-26T10:35:53,286 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:53,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:53,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,287 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617413311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617413312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617413313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617413313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617413313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,433 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:53,436 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126764168e6184e493dbca07e7b21eec5f7_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126764168e6184e493dbca07e7b21eec5f7_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:53,436 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/dfa477d503184025a31b8ee03366e902, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:35:53,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/dfa477d503184025a31b8ee03366e902 is 175, key is test_row_0/A:col10/1732617352995/Put/seqid=0 2024-11-26T10:35:53,438 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:53,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:53,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,439 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742326_1502 (size=30955) 2024-11-26T10:35:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-26T10:35:53,590 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:53,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:53,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,591 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617413615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617413615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617413616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617413616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:53,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617413617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,742 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:53,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:53,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,743 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,840 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=19, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/dfa477d503184025a31b8ee03366e902 2024-11-26T10:35:53,862 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/43a1edd4255e467083ce7f78917a5b8f is 50, key is test_row_0/B:col10/1732617352995/Put/seqid=0 2024-11-26T10:35:53,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742327_1503 (size=12001) 2024-11-26T10:35:53,894 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:53,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:53,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:53,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:53,895 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:53,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,046 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:54,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:54,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:54,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:54,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:54,047 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-26T10:35:54,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:54,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617414118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:54,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:54,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617414118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:54,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:54,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617414119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:54,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:54,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617414120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:54,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:54,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617414124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:54,199 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:54,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:54,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:54,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:54,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:54,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,285 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=19 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/43a1edd4255e467083ce7f78917a5b8f 2024-11-26T10:35:54,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/0a84e899b3ec44bca146059214289653 is 50, key is test_row_0/C:col10/1732617352995/Put/seqid=0 2024-11-26T10:35:54,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742328_1504 (size=12001) 2024-11-26T10:35:54,351 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:54,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:54,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:54,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:54,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:54,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,418 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-26T10:35:54,503 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:54,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:54,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:54,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:54,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:54,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,655 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:54,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:54,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:54,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:54,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:54,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:54,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=19 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/0a84e899b3ec44bca146059214289653 2024-11-26T10:35:54,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/dfa477d503184025a31b8ee03366e902 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/dfa477d503184025a31b8ee03366e902 2024-11-26T10:35:54,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/dfa477d503184025a31b8ee03366e902, entries=150, sequenceid=19, filesize=30.2 K 2024-11-26T10:35:54,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/43a1edd4255e467083ce7f78917a5b8f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/43a1edd4255e467083ce7f78917a5b8f 2024-11-26T10:35:54,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/43a1edd4255e467083ce7f78917a5b8f, entries=150, sequenceid=19, filesize=11.7 K 2024-11-26T10:35:54,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/0a84e899b3ec44bca146059214289653 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/0a84e899b3ec44bca146059214289653 2024-11-26T10:35:54,732 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/0a84e899b3ec44bca146059214289653, entries=150, sequenceid=19, filesize=11.7 K 2024-11-26T10:35:54,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for d75570331a075e2a47f6b93a7b93d8ef in 1736ms, sequenceid=19, compaction requested=false 2024-11-26T10:35:54,732 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-26T10:35:54,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:35:54,808 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:54,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-26T10:35:54,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:54,808 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-26T10:35:54,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:35:54,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:54,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:35:54,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:54,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:35:54,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:54,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112687c849b47eb3448aa75bda41b4c35418_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617353006/Put/seqid=0 2024-11-26T10:35:54,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742329_1505 (size=12154) 2024-11-26T10:35:55,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-26T10:35:55,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:55,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:55,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617415128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617415129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617415130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617415131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617415131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:55,227 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112687c849b47eb3448aa75bda41b4c35418_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112687c849b47eb3448aa75bda41b4c35418_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:55,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/bdb7f4a78d9146e5a7fb62734bc585b8, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:35:55,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/bdb7f4a78d9146e5a7fb62734bc585b8 is 175, key is test_row_0/A:col10/1732617353006/Put/seqid=0 2024-11-26T10:35:55,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742330_1506 (size=30955) 2024-11-26T10:35:55,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617415233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617415233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617415233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617415233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617415434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617415436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617415437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617415437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,632 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/bdb7f4a78d9146e5a7fb62734bc585b8 2024-11-26T10:35:55,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/9302b6982bec4e5e89443c8f6e934f4d is 50, key is test_row_0/B:col10/1732617353006/Put/seqid=0 2024-11-26T10:35:55,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742331_1507 (size=12001) 2024-11-26T10:35:55,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617415738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617415738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617415739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:55,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617415740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:56,040 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/9302b6982bec4e5e89443c8f6e934f4d 2024-11-26T10:35:56,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/18e7fe6407cc4101a1b0de2426535924 is 50, key is test_row_0/C:col10/1732617353006/Put/seqid=0 2024-11-26T10:35:56,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742332_1508 (size=12001) 2024-11-26T10:35:56,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:56,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617416243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:56,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:56,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617416243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:56,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:56,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617416245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:56,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:56,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617416245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:56,488 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/18e7fe6407cc4101a1b0de2426535924 2024-11-26T10:35:56,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/bdb7f4a78d9146e5a7fb62734bc585b8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/bdb7f4a78d9146e5a7fb62734bc585b8 2024-11-26T10:35:56,494 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/bdb7f4a78d9146e5a7fb62734bc585b8, entries=150, sequenceid=41, filesize=30.2 K 2024-11-26T10:35:56,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/9302b6982bec4e5e89443c8f6e934f4d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9302b6982bec4e5e89443c8f6e934f4d 2024-11-26T10:35:56,496 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9302b6982bec4e5e89443c8f6e934f4d, entries=150, sequenceid=41, filesize=11.7 K 2024-11-26T10:35:56,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/18e7fe6407cc4101a1b0de2426535924 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/18e7fe6407cc4101a1b0de2426535924 2024-11-26T10:35:56,499 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/18e7fe6407cc4101a1b0de2426535924, entries=150, sequenceid=41, filesize=11.7 K 2024-11-26T10:35:56,500 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for d75570331a075e2a47f6b93a7b93d8ef in 1692ms, sequenceid=41, compaction requested=false 2024-11-26T10:35:56,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:35:56,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:56,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-11-26T10:35:56,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-11-26T10:35:56,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-26T10:35:56,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5180 sec 2024-11-26T10:35:56,502 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 3.5220 sec 2024-11-26T10:35:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-26T10:35:57,087 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-11-26T10:35:57,087 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:35:57,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-11-26T10:35:57,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-26T10:35:57,119 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:35:57,119 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:35:57,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:35:57,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:57,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-26T10:35:57,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:35:57,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:57,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:35:57,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:57,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:35:57,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:57,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411267b2cbc915a244bdaa9ea923eed02daa7_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617357136/Put/seqid=0 2024-11-26T10:35:57,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742333_1509 (size=14594) 2024-11-26T10:35:57,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:57,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617417179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-26T10:35:57,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:57,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617417248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:57,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617417250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:57,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617417252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:57,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617417256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,270 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-26T10:35:57,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:57,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:57,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:57,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:57,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617417282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-26T10:35:57,422 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-26T10:35:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:57,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:57,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:57,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617417484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,545 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:57,548 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411267b2cbc915a244bdaa9ea923eed02daa7_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411267b2cbc915a244bdaa9ea923eed02daa7_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:57,549 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/fe88a830c3424271ae75faea8f0bcc05, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:35:57,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/fe88a830c3424271ae75faea8f0bcc05 is 175, key is test_row_0/A:col10/1732617357136/Put/seqid=0 2024-11-26T10:35:57,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742334_1510 (size=39549) 2024-11-26T10:35:57,574 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-26T10:35:57,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:57,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:57,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:57,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-26T10:35:57,726 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-26T10:35:57,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:57,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:57,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:57,727 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617417789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,878 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:57,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-26T10:35:57,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:57,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:57,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:57,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:57,952 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=57, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/fe88a830c3424271ae75faea8f0bcc05 2024-11-26T10:35:57,958 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/5fd9ff7c3aab4d4ab6fbba0b3e94b75c is 50, key is test_row_0/B:col10/1732617357136/Put/seqid=0 2024-11-26T10:35:57,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742335_1511 (size=12001) 2024-11-26T10:35:58,030 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:58,030 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-26T10:35:58,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:58,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,182 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:58,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-26T10:35:58,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:58,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,183 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-26T10:35:58,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:58,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617418293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:58,334 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:58,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-26T10:35:58,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:58,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,361 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/5fd9ff7c3aab4d4ab6fbba0b3e94b75c 2024-11-26T10:35:58,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/8644a96608fd4352813611212faabae4 is 50, key is test_row_0/C:col10/1732617357136/Put/seqid=0 2024-11-26T10:35:58,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742336_1512 (size=12001) 2024-11-26T10:35:58,487 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:58,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-26T10:35:58,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:58,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,639 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:58,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-26T10:35:58,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:58,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,640 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:35:58,771 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/8644a96608fd4352813611212faabae4 2024-11-26T10:35:58,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/fe88a830c3424271ae75faea8f0bcc05 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe88a830c3424271ae75faea8f0bcc05 2024-11-26T10:35:58,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe88a830c3424271ae75faea8f0bcc05, entries=200, sequenceid=57, filesize=38.6 K 2024-11-26T10:35:58,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/5fd9ff7c3aab4d4ab6fbba0b3e94b75c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5fd9ff7c3aab4d4ab6fbba0b3e94b75c 2024-11-26T10:35:58,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5fd9ff7c3aab4d4ab6fbba0b3e94b75c, entries=150, sequenceid=57, filesize=11.7 K 2024-11-26T10:35:58,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/8644a96608fd4352813611212faabae4 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/8644a96608fd4352813611212faabae4 2024-11-26T10:35:58,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/8644a96608fd4352813611212faabae4, entries=150, sequenceid=57, filesize=11.7 K 2024-11-26T10:35:58,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for d75570331a075e2a47f6b93a7b93d8ef in 1647ms, sequenceid=57, compaction requested=true 2024-11-26T10:35:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:35:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:35:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:58,784 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:35:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:58,784 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:35:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:58,785 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:58,785 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:58,785 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/B is initiating minor compaction (all files) 2024-11-26T10:35:58,785 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/A is initiating minor compaction (all files) 2024-11-26T10:35:58,785 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/A in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,785 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/B in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,785 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/43a1edd4255e467083ce7f78917a5b8f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9302b6982bec4e5e89443c8f6e934f4d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5fd9ff7c3aab4d4ab6fbba0b3e94b75c] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=35.2 K 2024-11-26T10:35:58,785 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/dfa477d503184025a31b8ee03366e902, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/bdb7f4a78d9146e5a7fb62734bc585b8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe88a830c3424271ae75faea8f0bcc05] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=99.1 K 2024-11-26T10:35:58,785 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,785 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/dfa477d503184025a31b8ee03366e902, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/bdb7f4a78d9146e5a7fb62734bc585b8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe88a830c3424271ae75faea8f0bcc05] 2024-11-26T10:35:58,785 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 43a1edd4255e467083ce7f78917a5b8f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=19, earliestPutTs=1732617352994 2024-11-26T10:35:58,785 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfa477d503184025a31b8ee03366e902, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=19, earliestPutTs=1732617352994 2024-11-26T10:35:58,785 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 9302b6982bec4e5e89443c8f6e934f4d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732617353005 2024-11-26T10:35:58,785 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdb7f4a78d9146e5a7fb62734bc585b8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732617353005 2024-11-26T10:35:58,785 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fd9ff7c3aab4d4ab6fbba0b3e94b75c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732617355129 2024-11-26T10:35:58,786 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe88a830c3424271ae75faea8f0bcc05, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732617355127 2024-11-26T10:35:58,790 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:35:58,791 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:58,791 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#B#compaction#437 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:58,792 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/1283c3ea48dd436783391d1aa24b0cc5 is 50, key is test_row_0/B:col10/1732617357136/Put/seqid=0 2024-11-26T10:35:58,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-26T10:35:58,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:58,793 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-26T10:35:58,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:35:58,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:58,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:35:58,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:58,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:35:58,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:58,796 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241126adcda61131784bd4a19266bf18e3c7a1_d75570331a075e2a47f6b93a7b93d8ef store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:35:58,798 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241126adcda61131784bd4a19266bf18e3c7a1_d75570331a075e2a47f6b93a7b93d8ef, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:35:58,798 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126adcda61131784bd4a19266bf18e3c7a1_d75570331a075e2a47f6b93a7b93d8ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:35:58,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742337_1513 (size=12104) 2024-11-26T10:35:58,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411262e8a08f4427d48a88b6563e9f044eacf_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617357174/Put/seqid=0 2024-11-26T10:35:58,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742338_1514 (size=4469) 2024-11-26T10:35:58,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742339_1515 (size=12154) 2024-11-26T10:35:58,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:35:58,838 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411262e8a08f4427d48a88b6563e9f044eacf_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411262e8a08f4427d48a88b6563e9f044eacf_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:58,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/2423a91096bb4de7992fbccf69edeb65, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:35:58,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/2423a91096bb4de7992fbccf69edeb65 is 175, key is test_row_0/A:col10/1732617357174/Put/seqid=0 2024-11-26T10:35:58,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742340_1516 (size=30955) 2024-11-26T10:35:58,844 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/2423a91096bb4de7992fbccf69edeb65 2024-11-26T10:35:58,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/5bee5d19ab7743699ee824769e0f3cc6 is 50, key is test_row_0/B:col10/1732617357174/Put/seqid=0 2024-11-26T10:35:58,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742341_1517 (size=12001) 2024-11-26T10:35:59,219 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/1283c3ea48dd436783391d1aa24b0cc5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/1283c3ea48dd436783391d1aa24b0cc5 2024-11-26T10:35:59,223 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/B of d75570331a075e2a47f6b93a7b93d8ef into 1283c3ea48dd436783391d1aa24b0cc5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:59,223 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:35:59,223 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/B, priority=13, startTime=1732617358784; duration=0sec 2024-11-26T10:35:59,223 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:35:59,223 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:B 2024-11-26T10:35:59,223 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:35:59,224 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:35:59,224 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/C is initiating minor compaction (all files) 2024-11-26T10:35:59,224 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/C in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:59,224 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/0a84e899b3ec44bca146059214289653, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/18e7fe6407cc4101a1b0de2426535924, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/8644a96608fd4352813611212faabae4] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=35.2 K 2024-11-26T10:35:59,224 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a84e899b3ec44bca146059214289653, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=19, earliestPutTs=1732617352994 2024-11-26T10:35:59,224 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 18e7fe6407cc4101a1b0de2426535924, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732617353005 2024-11-26T10:35:59,225 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 8644a96608fd4352813611212faabae4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732617355129 2024-11-26T10:35:59,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-26T10:35:59,228 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#A#compaction#438 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:59,229 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/f250bb6da09b4dcbb6128fd08fe559f0 is 175, key is test_row_0/A:col10/1732617357136/Put/seqid=0 2024-11-26T10:35:59,231 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#C#compaction#441 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:35:59,231 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/b7136858b3ba4008ba59a04c2495a1ca is 50, key is test_row_0/C:col10/1732617357136/Put/seqid=0 2024-11-26T10:35:59,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742342_1518 (size=31058) 2024-11-26T10:35:59,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742343_1519 (size=12104) 2024-11-26T10:35:59,238 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/f250bb6da09b4dcbb6128fd08fe559f0 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f250bb6da09b4dcbb6128fd08fe559f0 2024-11-26T10:35:59,241 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/A of d75570331a075e2a47f6b93a7b93d8ef into f250bb6da09b4dcbb6128fd08fe559f0(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:59,241 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:35:59,241 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/A, priority=13, startTime=1732617358784; duration=0sec 2024-11-26T10:35:59,241 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:59,241 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:A 2024-11-26T10:35:59,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:59,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:35:59,253 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/5bee5d19ab7743699ee824769e0f3cc6 2024-11-26T10:35:59,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a330525e89824e0a956e041acbf2c058 is 50, key is test_row_0/C:col10/1732617357174/Put/seqid=0 2024-11-26T10:35:59,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742344_1520 (size=12001) 2024-11-26T10:35:59,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617419267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617419267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617419267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,270 DEBUG [Thread-2246 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:35:59,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617419268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617419297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617419371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617419371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617419371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617419573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617419574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617419574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,638 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/b7136858b3ba4008ba59a04c2495a1ca as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/b7136858b3ba4008ba59a04c2495a1ca 2024-11-26T10:35:59,641 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/C of d75570331a075e2a47f6b93a7b93d8ef into b7136858b3ba4008ba59a04c2495a1ca(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:35:59,641 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:35:59,641 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/C, priority=13, startTime=1732617358784; duration=0sec 2024-11-26T10:35:59,642 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:35:59,642 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:C 2024-11-26T10:35:59,663 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a330525e89824e0a956e041acbf2c058 2024-11-26T10:35:59,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/2423a91096bb4de7992fbccf69edeb65 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/2423a91096bb4de7992fbccf69edeb65 2024-11-26T10:35:59,669 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/2423a91096bb4de7992fbccf69edeb65, entries=150, sequenceid=77, filesize=30.2 K 2024-11-26T10:35:59,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/5bee5d19ab7743699ee824769e0f3cc6 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5bee5d19ab7743699ee824769e0f3cc6 2024-11-26T10:35:59,672 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5bee5d19ab7743699ee824769e0f3cc6, entries=150, sequenceid=77, filesize=11.7 K 2024-11-26T10:35:59,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a330525e89824e0a956e041acbf2c058 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a330525e89824e0a956e041acbf2c058 2024-11-26T10:35:59,675 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a330525e89824e0a956e041acbf2c058, entries=150, sequenceid=77, filesize=11.7 K 2024-11-26T10:35:59,675 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for d75570331a075e2a47f6b93a7b93d8ef in 882ms, sequenceid=77, compaction requested=false 2024-11-26T10:35:59,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:35:59,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:35:59,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-11-26T10:35:59,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-11-26T10:35:59,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-26T10:35:59,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5570 sec 2024-11-26T10:35:59,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 2.5900 sec 2024-11-26T10:35:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:35:59,877 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-26T10:35:59,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:35:59,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:59,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:35:59,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:59,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:35:59,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:35:59,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112643a6e6ab8da84d5cb7601d31276cca95_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617359266/Put/seqid=0 2024-11-26T10:35:59,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742345_1521 (size=14594) 2024-11-26T10:35:59,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617419925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617419925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:35:59,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:35:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617419925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:00,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617420030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:00,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617420030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:00,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617420030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:00,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:00,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617420232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:00,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:00,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617420232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:00,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:00,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617420232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:00,286 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:00,289 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112643a6e6ab8da84d5cb7601d31276cca95_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112643a6e6ab8da84d5cb7601d31276cca95_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:00,289 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/ae06bd77d73c4002844d58007cfaadd4, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:00,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/ae06bd77d73c4002844d58007cfaadd4 is 175, key is test_row_0/A:col10/1732617359266/Put/seqid=0 2024-11-26T10:36:00,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742346_1522 (size=39549) 2024-11-26T10:36:00,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617420535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:00,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617420536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:00,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617420536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:00,702 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=98, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/ae06bd77d73c4002844d58007cfaadd4 2024-11-26T10:36:00,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/05a13d1c87064c8686af7304e9028d7b is 50, key is test_row_0/B:col10/1732617359266/Put/seqid=0 2024-11-26T10:36:00,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742347_1523 (size=12001) 2024-11-26T10:36:01,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617421038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:01,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617421039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:01,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:01,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617421039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:01,112 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/05a13d1c87064c8686af7304e9028d7b 2024-11-26T10:36:01,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a196fcfc64f94d2a8843070dddc32270 is 50, key is test_row_0/C:col10/1732617359266/Put/seqid=0 2024-11-26T10:36:01,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742348_1524 (size=12001) 2024-11-26T10:36:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-26T10:36:01,229 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-11-26T10:36:01,229 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:36:01,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-11-26T10:36:01,230 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:36:01,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-26T10:36:01,231 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:36:01,231 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:36:01,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617421308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:01,310 DEBUG [Thread-2250 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4131 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:36:01,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-26T10:36:01,381 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:01,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-26T10:36:01,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:01,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:01,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:01,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:01,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:01,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:01,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a196fcfc64f94d2a8843070dddc32270 2024-11-26T10:36:01,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/ae06bd77d73c4002844d58007cfaadd4 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/ae06bd77d73c4002844d58007cfaadd4 2024-11-26T10:36:01,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/ae06bd77d73c4002844d58007cfaadd4, entries=200, sequenceid=98, filesize=38.6 K 2024-11-26T10:36:01,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/05a13d1c87064c8686af7304e9028d7b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/05a13d1c87064c8686af7304e9028d7b 2024-11-26T10:36:01,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/05a13d1c87064c8686af7304e9028d7b, entries=150, sequenceid=98, filesize=11.7 K 2024-11-26T10:36:01,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a196fcfc64f94d2a8843070dddc32270 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a196fcfc64f94d2a8843070dddc32270 2024-11-26T10:36:01,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-26T10:36:01,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a196fcfc64f94d2a8843070dddc32270, entries=150, sequenceid=98, filesize=11.7 K 2024-11-26T10:36:01,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=114.05 KB/116790 for d75570331a075e2a47f6b93a7b93d8ef in 1656ms, sequenceid=98, compaction requested=true 2024-11-26T10:36:01,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:01,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:36:01,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:01,533 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:01,533 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:01,533 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:01,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-26T10:36:01,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:01,534 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-26T10:36:01,534 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:01,534 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/A is initiating minor compaction (all files) 2024-11-26T10:36:01,534 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/A in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:01,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:36:01,534 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f250bb6da09b4dcbb6128fd08fe559f0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/2423a91096bb4de7992fbccf69edeb65, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/ae06bd77d73c4002844d58007cfaadd4] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=99.2 K 2024-11-26T10:36:01,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:01,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:36:01,534 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:01,534 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f250bb6da09b4dcbb6128fd08fe559f0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/2423a91096bb4de7992fbccf69edeb65, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/ae06bd77d73c4002844d58007cfaadd4] 2024-11-26T10:36:01,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:01,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:36:01,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:01,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:36:01,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:01,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:36:01,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:36:01,534 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting f250bb6da09b4dcbb6128fd08fe559f0, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732617355129 2024-11-26T10:36:01,535 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:01,535 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/B is initiating minor compaction (all files) 2024-11-26T10:36:01,535 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/B in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:01,535 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2423a91096bb4de7992fbccf69edeb65, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732617357171 2024-11-26T10:36:01,535 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/1283c3ea48dd436783391d1aa24b0cc5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5bee5d19ab7743699ee824769e0f3cc6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/05a13d1c87064c8686af7304e9028d7b] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=35.3 K 2024-11-26T10:36:01,535 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae06bd77d73c4002844d58007cfaadd4, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732617359266 2024-11-26T10:36:01,535 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1283c3ea48dd436783391d1aa24b0cc5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732617355129 2024-11-26T10:36:01,535 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bee5d19ab7743699ee824769e0f3cc6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732617357171 2024-11-26T10:36:01,535 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 05a13d1c87064c8686af7304e9028d7b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732617359266 2024-11-26T10:36:01,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411266175816aa9ea4b139c54fc29ad855680_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617359920/Put/seqid=0 2024-11-26T10:36:01,539 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:01,541 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241126ddb3b11beed14792a95684c28298a7fe_d75570331a075e2a47f6b93a7b93d8ef store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:01,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742349_1525 (size=12154) 2024-11-26T10:36:01,542 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#B#compaction#448 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:01,542 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/d5af0ec9e6bd447682d99e944967be39 is 50, key is test_row_0/B:col10/1732617359266/Put/seqid=0 2024-11-26T10:36:01,542 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241126ddb3b11beed14792a95684c28298a7fe_d75570331a075e2a47f6b93a7b93d8ef, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:01,542 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126ddb3b11beed14792a95684c28298a7fe_d75570331a075e2a47f6b93a7b93d8ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:01,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742350_1526 (size=12207) 2024-11-26T10:36:01,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742351_1527 (size=4469) 2024-11-26T10:36:01,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-26T10:36:01,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:01,945 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411266175816aa9ea4b139c54fc29ad855680_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266175816aa9ea4b139c54fc29ad855680_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:01,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/c4bfa07f110045498b974c336fdb7fe8, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:01,946 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#A#compaction#447 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:01,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/c4bfa07f110045498b974c336fdb7fe8 is 175, key is test_row_0/A:col10/1732617359920/Put/seqid=0 2024-11-26T10:36:01,946 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/d7ca4afe7ae646d58665e0602cfd61c8 is 175, key is test_row_0/A:col10/1732617359266/Put/seqid=0 2024-11-26T10:36:01,949 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/d5af0ec9e6bd447682d99e944967be39 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d5af0ec9e6bd447682d99e944967be39 2024-11-26T10:36:01,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742353_1529 (size=31161) 2024-11-26T10:36:01,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742352_1528 (size=30955) 2024-11-26T10:36:01,953 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/B of d75570331a075e2a47f6b93a7b93d8ef into d5af0ec9e6bd447682d99e944967be39(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:01,953 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:01,953 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/B, priority=13, startTime=1732617361533; duration=0sec 2024-11-26T10:36:01,953 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:36:01,953 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:B 2024-11-26T10:36:01,953 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:01,954 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:01,954 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/C is initiating minor compaction (all files) 2024-11-26T10:36:01,954 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/C in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:01,954 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/b7136858b3ba4008ba59a04c2495a1ca, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a330525e89824e0a956e041acbf2c058, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a196fcfc64f94d2a8843070dddc32270] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=35.3 K 2024-11-26T10:36:01,954 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b7136858b3ba4008ba59a04c2495a1ca, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732617355129 2024-11-26T10:36:01,954 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a330525e89824e0a956e041acbf2c058, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732617357171 2024-11-26T10:36:01,955 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a196fcfc64f94d2a8843070dddc32270, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732617359266 2024-11-26T10:36:01,959 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#C#compaction#449 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:01,959 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/84a4a975b3ff475f857b2c159184b2ff is 50, key is test_row_0/C:col10/1732617359266/Put/seqid=0 2024-11-26T10:36:01,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742354_1530 (size=12207) 2024-11-26T10:36:02,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:02,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:02,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617422059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617422059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617422062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617422163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617422163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617422165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-26T10:36:02,350 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/c4bfa07f110045498b974c336fdb7fe8 2024-11-26T10:36:02,352 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/d7ca4afe7ae646d58665e0602cfd61c8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/d7ca4afe7ae646d58665e0602cfd61c8 2024-11-26T10:36:02,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/24825155c5ae4205b95b931386fe5502 is 50, key is test_row_0/B:col10/1732617359920/Put/seqid=0 2024-11-26T10:36:02,355 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/A of d75570331a075e2a47f6b93a7b93d8ef into d7ca4afe7ae646d58665e0602cfd61c8(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:02,355 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:02,355 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/A, priority=13, startTime=1732617361533; duration=0sec 2024-11-26T10:36:02,356 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:02,356 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:A 2024-11-26T10:36:02,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742355_1531 (size=12001) 2024-11-26T10:36:02,357 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/24825155c5ae4205b95b931386fe5502 2024-11-26T10:36:02,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/f0d912d26fef4f5494ad26c4e88f3080 is 50, key is test_row_0/C:col10/1732617359920/Put/seqid=0 2024-11-26T10:36:02,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742356_1532 (size=12001) 2024-11-26T10:36:02,365 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/84a4a975b3ff475f857b2c159184b2ff as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/84a4a975b3ff475f857b2c159184b2ff 2024-11-26T10:36:02,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617422366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617422366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,369 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/C of d75570331a075e2a47f6b93a7b93d8ef into 84a4a975b3ff475f857b2c159184b2ff(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:02,369 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:02,369 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/C, priority=13, startTime=1732617361534; duration=0sec 2024-11-26T10:36:02,369 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:02,369 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:C 2024-11-26T10:36:02,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617422369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617422670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617422671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:02,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617422673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:02,765 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/f0d912d26fef4f5494ad26c4e88f3080 2024-11-26T10:36:02,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/c4bfa07f110045498b974c336fdb7fe8 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c4bfa07f110045498b974c336fdb7fe8 2024-11-26T10:36:02,770 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c4bfa07f110045498b974c336fdb7fe8, entries=150, sequenceid=118, filesize=30.2 K 2024-11-26T10:36:02,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/24825155c5ae4205b95b931386fe5502 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/24825155c5ae4205b95b931386fe5502 2024-11-26T10:36:02,773 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/24825155c5ae4205b95b931386fe5502, entries=150, sequenceid=118, filesize=11.7 K 2024-11-26T10:36:02,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/f0d912d26fef4f5494ad26c4e88f3080 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/f0d912d26fef4f5494ad26c4e88f3080 2024-11-26T10:36:02,776 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/f0d912d26fef4f5494ad26c4e88f3080, entries=150, sequenceid=118, filesize=11.7 K 2024-11-26T10:36:02,776 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for d75570331a075e2a47f6b93a7b93d8ef in 1242ms, sequenceid=118, compaction requested=false 2024-11-26T10:36:02,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:02,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:02,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-11-26T10:36:02,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-11-26T10:36:02,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-26T10:36:02,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5460 sec 2024-11-26T10:36:02,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 1.5490 sec 2024-11-26T10:36:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:03,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-26T10:36:03,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:36:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:36:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:36:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:03,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126a28b82377f724a1b95f6f11a6e1b18d9_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617363173/Put/seqid=0 2024-11-26T10:36:03,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742357_1533 (size=14744) 2024-11-26T10:36:03,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617423221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617423225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617423225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34534 deadline: 1732617423299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,301 DEBUG [Thread-2246 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., hostname=ccf62758a0a5,45419,1732617185877, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-26T10:36:03,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617423325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617423329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617423329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-26T10:36:03,334 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-26T10:36:03,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:36:03,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-26T10:36:03,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-26T10:36:03,335 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:36:03,336 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:36:03,336 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:36:03,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-26T10:36:03,487 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-26T10:36:03,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:03,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:03,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:03,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617423529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617423531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617423532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,585 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:03,587 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126a28b82377f724a1b95f6f11a6e1b18d9_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126a28b82377f724a1b95f6f11a6e1b18d9_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:03,588 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/3db3ed434d614145bee6a92a29a21979, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:03,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/3db3ed434d614145bee6a92a29a21979 is 175, key is test_row_0/A:col10/1732617363173/Put/seqid=0 2024-11-26T10:36:03,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742358_1534 (size=39699) 2024-11-26T10:36:03,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-26T10:36:03,639 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-26T10:36:03,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:03,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:03,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:03,640 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,791 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-26T10:36:03,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:03,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:03,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:03,792 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617423832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617423837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:03,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617423837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-26T10:36:03,943 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:03,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-26T10:36:03,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:03,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:03,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:03,944 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:03,992 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=138, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/3db3ed434d614145bee6a92a29a21979 2024-11-26T10:36:03,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/c0d486e68530410e85170092d9b5a851 is 50, key is test_row_0/B:col10/1732617363173/Put/seqid=0 2024-11-26T10:36:03,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742359_1535 (size=12101) 2024-11-26T10:36:04,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/c0d486e68530410e85170092d9b5a851 2024-11-26T10:36:04,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/934382791ad54ef39561eece2c2dbbe3 is 50, key is test_row_0/C:col10/1732617363173/Put/seqid=0 2024-11-26T10:36:04,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742360_1536 (size=12101) 2024-11-26T10:36:04,095 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:04,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-26T10:36:04,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:04,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:04,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:04,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:04,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:04,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:04,248 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:04,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-26T10:36:04,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:04,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:04,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:04,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:04,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:04,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:04,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:04,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617424336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:04,344 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-26T10:36:04,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:04,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617424342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:04,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:04,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617424344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:04,400 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:04,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-26T10:36:04,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:04,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:04,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:04,400 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:04,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:04,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:04,408 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/934382791ad54ef39561eece2c2dbbe3 2024-11-26T10:36:04,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/3db3ed434d614145bee6a92a29a21979 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/3db3ed434d614145bee6a92a29a21979 2024-11-26T10:36:04,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/3db3ed434d614145bee6a92a29a21979, entries=200, sequenceid=138, filesize=38.8 K 2024-11-26T10:36:04,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/c0d486e68530410e85170092d9b5a851 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/c0d486e68530410e85170092d9b5a851 2024-11-26T10:36:04,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/c0d486e68530410e85170092d9b5a851, entries=150, sequenceid=138, filesize=11.8 K 2024-11-26T10:36:04,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/934382791ad54ef39561eece2c2dbbe3 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/934382791ad54ef39561eece2c2dbbe3 2024-11-26T10:36:04,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/934382791ad54ef39561eece2c2dbbe3, entries=150, sequenceid=138, filesize=11.8 K 2024-11-26T10:36:04,421 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for d75570331a075e2a47f6b93a7b93d8ef in 1246ms, sequenceid=138, compaction requested=true 2024-11-26T10:36:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:36:04,421 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:04,421 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:36:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:36:04,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:36:04,422 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:04,422 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:04,423 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/B is initiating minor compaction (all files) 2024-11-26T10:36:04,423 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/A is initiating minor compaction (all files) 2024-11-26T10:36:04,423 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/B in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:04,423 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/A in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:04,423 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d5af0ec9e6bd447682d99e944967be39, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/24825155c5ae4205b95b931386fe5502, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/c0d486e68530410e85170092d9b5a851] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=35.5 K 2024-11-26T10:36:04,423 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/d7ca4afe7ae646d58665e0602cfd61c8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c4bfa07f110045498b974c336fdb7fe8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/3db3ed434d614145bee6a92a29a21979] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=99.4 K 2024-11-26T10:36:04,423 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:04,423 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/d7ca4afe7ae646d58665e0602cfd61c8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c4bfa07f110045498b974c336fdb7fe8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/3db3ed434d614145bee6a92a29a21979] 2024-11-26T10:36:04,423 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting d5af0ec9e6bd447682d99e944967be39, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732617359266 2024-11-26T10:36:04,423 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7ca4afe7ae646d58665e0602cfd61c8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732617359266 2024-11-26T10:36:04,423 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 24825155c5ae4205b95b931386fe5502, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732617359920 2024-11-26T10:36:04,423 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4bfa07f110045498b974c336fdb7fe8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732617359920 2024-11-26T10:36:04,423 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting c0d486e68530410e85170092d9b5a851, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732617362052 2024-11-26T10:36:04,423 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3db3ed434d614145bee6a92a29a21979, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732617362052 2024-11-26T10:36:04,427 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:04,428 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#B#compaction#456 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:04,429 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/b2d8a0524edc4c1dbf627fa8873a8ec5 is 50, key is test_row_0/B:col10/1732617363173/Put/seqid=0 2024-11-26T10:36:04,430 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411269994544a9abc4f91a527ba9355e691a0_d75570331a075e2a47f6b93a7b93d8ef store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:04,432 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411269994544a9abc4f91a527ba9355e691a0_d75570331a075e2a47f6b93a7b93d8ef, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:04,432 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411269994544a9abc4f91a527ba9355e691a0_d75570331a075e2a47f6b93a7b93d8ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:04,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742361_1537 (size=12409) 2024-11-26T10:36:04,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-26T10:36:04,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742362_1538 (size=4469) 2024-11-26T10:36:04,552 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:04,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-26T10:36:04,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:04,553 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-26T10:36:04,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:36:04,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:04,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:36:04,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:04,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:36:04,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:04,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126801b1f6d967642b6af925d6e0a324a22_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617363224/Put/seqid=0 2024-11-26T10:36:04,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742363_1539 (size=12304) 2024-11-26T10:36:04,839 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/b2d8a0524edc4c1dbf627fa8873a8ec5 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/b2d8a0524edc4c1dbf627fa8873a8ec5 2024-11-26T10:36:04,843 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#A#compaction#455 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:04,843 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/B of d75570331a075e2a47f6b93a7b93d8ef into b2d8a0524edc4c1dbf627fa8873a8ec5(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:04,843 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:04,843 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/B, priority=13, startTime=1732617364421; duration=0sec 2024-11-26T10:36:04,843 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:36:04,843 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:B 2024-11-26T10:36:04,843 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:04,844 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/e0877ba7e32b4dc5aea5d8bddecf7519 is 175, key is test_row_0/A:col10/1732617363173/Put/seqid=0 2024-11-26T10:36:04,847 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:04,847 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/C is initiating minor compaction (all files) 2024-11-26T10:36:04,848 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/C in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:04,848 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/84a4a975b3ff475f857b2c159184b2ff, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/f0d912d26fef4f5494ad26c4e88f3080, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/934382791ad54ef39561eece2c2dbbe3] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=35.5 K 2024-11-26T10:36:04,849 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 84a4a975b3ff475f857b2c159184b2ff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732617359266 2024-11-26T10:36:04,849 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting f0d912d26fef4f5494ad26c4e88f3080, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732617359920 2024-11-26T10:36:04,849 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 934382791ad54ef39561eece2c2dbbe3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732617362052 2024-11-26T10:36:04,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742364_1540 (size=31363) 2024-11-26T10:36:04,853 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/e0877ba7e32b4dc5aea5d8bddecf7519 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/e0877ba7e32b4dc5aea5d8bddecf7519 2024-11-26T10:36:04,856 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#C#compaction#458 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:04,856 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a6ffafb917964ad99ffc380da04e750b is 50, key is test_row_0/C:col10/1732617363173/Put/seqid=0 2024-11-26T10:36:04,858 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/A of d75570331a075e2a47f6b93a7b93d8ef into e0877ba7e32b4dc5aea5d8bddecf7519(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:04,858 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:04,858 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/A, priority=13, startTime=1732617364421; duration=0sec 2024-11-26T10:36:04,858 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:04,858 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:A 2024-11-26T10:36:04,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742365_1541 (size=12409) 2024-11-26T10:36:04,863 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a6ffafb917964ad99ffc380da04e750b as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a6ffafb917964ad99ffc380da04e750b 2024-11-26T10:36:04,866 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/C of d75570331a075e2a47f6b93a7b93d8ef into a6ffafb917964ad99ffc380da04e750b(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:04,866 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:04,866 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/C, priority=13, startTime=1732617364421; duration=0sec 2024-11-26T10:36:04,866 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:04,866 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:C 2024-11-26T10:36:04,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:04,965 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126801b1f6d967642b6af925d6e0a324a22_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126801b1f6d967642b6af925d6e0a324a22_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:04,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/c36e341168b749c8ab1026b9c2671d97, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:04,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/c36e341168b749c8ab1026b9c2671d97 is 175, key is test_row_0/A:col10/1732617363224/Put/seqid=0 2024-11-26T10:36:04,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742366_1542 (size=31105) 2024-11-26T10:36:05,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:05,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:05,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617425352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617425353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617425354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617425354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,369 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/c36e341168b749c8ab1026b9c2671d97 2024-11-26T10:36:05,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/02acbfb445e449cd84125ab9d4407ddc is 50, key is test_row_0/B:col10/1732617363224/Put/seqid=0 2024-11-26T10:36:05,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742367_1543 (size=12151) 2024-11-26T10:36:05,377 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/02acbfb445e449cd84125ab9d4407ddc 2024-11-26T10:36:05,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/3eeeaadf0573427baf4940f339865a38 is 50, key is test_row_0/C:col10/1732617363224/Put/seqid=0 2024-11-26T10:36:05,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742368_1544 (size=12151) 2024-11-26T10:36:05,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-26T10:36:05,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617425455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617425457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617425457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617425458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617425657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617425659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617425659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617425660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:05,784 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/3eeeaadf0573427baf4940f339865a38 2024-11-26T10:36:05,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/c36e341168b749c8ab1026b9c2671d97 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c36e341168b749c8ab1026b9c2671d97 2024-11-26T10:36:05,790 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c36e341168b749c8ab1026b9c2671d97, entries=150, sequenceid=158, filesize=30.4 K 2024-11-26T10:36:05,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/02acbfb445e449cd84125ab9d4407ddc as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/02acbfb445e449cd84125ab9d4407ddc 2024-11-26T10:36:05,793 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/02acbfb445e449cd84125ab9d4407ddc, entries=150, sequenceid=158, filesize=11.9 K 2024-11-26T10:36:05,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/3eeeaadf0573427baf4940f339865a38 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/3eeeaadf0573427baf4940f339865a38 2024-11-26T10:36:05,796 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/3eeeaadf0573427baf4940f339865a38, entries=150, sequenceid=158, filesize=11.9 K 2024-11-26T10:36:05,796 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for d75570331a075e2a47f6b93a7b93d8ef in 1243ms, sequenceid=158, compaction requested=false 2024-11-26T10:36:05,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:05,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:05,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-26T10:36:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-26T10:36:05,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-26T10:36:05,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4610 sec 2024-11-26T10:36:05,799 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 2.4640 sec 2024-11-26T10:36:05,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:05,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-26T10:36:05,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:36:05,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:05,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:36:05,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:05,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:36:05,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:05,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411266f752aa71afd4c34b6c8b1274f7ac5b9_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617365353/Put/seqid=0 2024-11-26T10:36:05,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742369_1545 (size=14794) 2024-11-26T10:36:06,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617426002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,007 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617426004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617426005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617426006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617426106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617426108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617426109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617426109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617426310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617426311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617426312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617426312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,370 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:06,373 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411266f752aa71afd4c34b6c8b1274f7ac5b9_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266f752aa71afd4c34b6c8b1274f7ac5b9_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:06,373 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/78dad1e7699e4b18a1e3f33235ba9226, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:06,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/78dad1e7699e4b18a1e3f33235ba9226 is 175, key is test_row_0/A:col10/1732617365353/Put/seqid=0 2024-11-26T10:36:06,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742370_1546 (size=39749) 2024-11-26T10:36:06,377 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=178, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/78dad1e7699e4b18a1e3f33235ba9226 2024-11-26T10:36:06,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/d3bd09d69f5347d181fc5d892ee6715e is 50, key is test_row_0/B:col10/1732617365353/Put/seqid=0 2024-11-26T10:36:06,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742371_1547 (size=12151) 2024-11-26T10:36:06,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617426614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617426615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617426615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:06,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617426617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:06,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/d3bd09d69f5347d181fc5d892ee6715e 2024-11-26T10:36:06,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/1f32a3763af04201b6f51c5e4ea77d6c is 50, key is test_row_0/C:col10/1732617365353/Put/seqid=0 2024-11-26T10:36:06,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742372_1548 (size=12151) 2024-11-26T10:36:07,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:07,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617427120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:07,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:07,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617427120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:07,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:07,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617427120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:07,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:07,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617427122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:07,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/1f32a3763af04201b6f51c5e4ea77d6c 2024-11-26T10:36:07,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/78dad1e7699e4b18a1e3f33235ba9226 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/78dad1e7699e4b18a1e3f33235ba9226 2024-11-26T10:36:07,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/78dad1e7699e4b18a1e3f33235ba9226, entries=200, sequenceid=178, filesize=38.8 K 2024-11-26T10:36:07,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/d3bd09d69f5347d181fc5d892ee6715e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d3bd09d69f5347d181fc5d892ee6715e 2024-11-26T10:36:07,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d3bd09d69f5347d181fc5d892ee6715e, entries=150, sequenceid=178, filesize=11.9 K 2024-11-26T10:36:07,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/1f32a3763af04201b6f51c5e4ea77d6c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/1f32a3763af04201b6f51c5e4ea77d6c 2024-11-26T10:36:07,229 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/1f32a3763af04201b6f51c5e4ea77d6c, entries=150, sequenceid=178, filesize=11.9 K 2024-11-26T10:36:07,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for d75570331a075e2a47f6b93a7b93d8ef in 1267ms, sequenceid=178, compaction requested=true 2024-11-26T10:36:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:36:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:07,229 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:07,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:36:07,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:07,230 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:07,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:36:07,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:36:07,230 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:07,230 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102217 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:07,230 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/B is initiating minor compaction (all files) 2024-11-26T10:36:07,230 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/A is initiating minor compaction (all files) 2024-11-26T10:36:07,230 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/B in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:07,230 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/A in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:07,230 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/b2d8a0524edc4c1dbf627fa8873a8ec5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/02acbfb445e449cd84125ab9d4407ddc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d3bd09d69f5347d181fc5d892ee6715e] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=35.9 K 2024-11-26T10:36:07,230 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/e0877ba7e32b4dc5aea5d8bddecf7519, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c36e341168b749c8ab1026b9c2671d97, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/78dad1e7699e4b18a1e3f33235ba9226] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=99.8 K 2024-11-26T10:36:07,230 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:07,230 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/e0877ba7e32b4dc5aea5d8bddecf7519, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c36e341168b749c8ab1026b9c2671d97, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/78dad1e7699e4b18a1e3f33235ba9226] 2024-11-26T10:36:07,231 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b2d8a0524edc4c1dbf627fa8873a8ec5, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732617362052 2024-11-26T10:36:07,231 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0877ba7e32b4dc5aea5d8bddecf7519, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732617362052 2024-11-26T10:36:07,231 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 02acbfb445e449cd84125ab9d4407ddc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732617363185 2024-11-26T10:36:07,231 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting c36e341168b749c8ab1026b9c2671d97, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732617363185 2024-11-26T10:36:07,231 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting d3bd09d69f5347d181fc5d892ee6715e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732617365353 2024-11-26T10:36:07,231 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78dad1e7699e4b18a1e3f33235ba9226, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732617365352 2024-11-26T10:36:07,235 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:07,236 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#B#compaction#464 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:07,237 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/fc96412b1e044e53be8934336876edff is 50, key is test_row_0/B:col10/1732617365353/Put/seqid=0 2024-11-26T10:36:07,240 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112628e1b98ecc1a4b19a097a712432d4ca6_d75570331a075e2a47f6b93a7b93d8ef store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:07,242 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112628e1b98ecc1a4b19a097a712432d4ca6_d75570331a075e2a47f6b93a7b93d8ef, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:07,242 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112628e1b98ecc1a4b19a097a712432d4ca6_d75570331a075e2a47f6b93a7b93d8ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:07,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742373_1549 (size=12561) 2024-11-26T10:36:07,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742374_1550 (size=4469) 2024-11-26T10:36:07,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-26T10:36:07,439 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-26T10:36:07,440 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:36:07,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-26T10:36:07,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-26T10:36:07,441 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:36:07,441 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:36:07,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:36:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-26T10:36:07,592 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:07,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-26T10:36:07,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:07,592 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-26T10:36:07,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:36:07,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:07,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:36:07,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:07,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:36:07,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:07,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126b6a6150425e44eed853802f37b28d408_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617366003/Put/seqid=0 2024-11-26T10:36:07,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742375_1551 (size=12304) 2024-11-26T10:36:07,651 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/fc96412b1e044e53be8934336876edff as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/fc96412b1e044e53be8934336876edff 2024-11-26T10:36:07,654 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/B of d75570331a075e2a47f6b93a7b93d8ef into fc96412b1e044e53be8934336876edff(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:07,654 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:07,654 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/B, priority=13, startTime=1732617367229; duration=0sec 2024-11-26T10:36:07,654 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:36:07,654 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:B 2024-11-26T10:36:07,654 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:07,655 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:07,655 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/C is initiating minor compaction (all files) 2024-11-26T10:36:07,655 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/C in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:07,655 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a6ffafb917964ad99ffc380da04e750b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/3eeeaadf0573427baf4940f339865a38, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/1f32a3763af04201b6f51c5e4ea77d6c] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=35.9 K 2024-11-26T10:36:07,656 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a6ffafb917964ad99ffc380da04e750b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732617362052 2024-11-26T10:36:07,656 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 3eeeaadf0573427baf4940f339865a38, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732617363185 2024-11-26T10:36:07,656 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f32a3763af04201b6f51c5e4ea77d6c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732617365353 2024-11-26T10:36:07,657 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#A#compaction#465 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:07,657 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/7879169a41f841cd9e312077c58d8b49 is 175, key is test_row_0/A:col10/1732617365353/Put/seqid=0 2024-11-26T10:36:07,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742376_1552 (size=31515) 2024-11-26T10:36:07,662 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#C#compaction#467 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:07,663 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/4008d5bc36b543ef9ed09eff7a610fc9 is 50, key is test_row_0/C:col10/1732617365353/Put/seqid=0 2024-11-26T10:36:07,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742377_1553 (size=12561) 2024-11-26T10:36:07,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-26T10:36:08,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,017 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126b6a6150425e44eed853802f37b28d408_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126b6a6150425e44eed853802f37b28d408_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:08,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/baf8c84731704333b2f2bd408e17fcda, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:08,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/baf8c84731704333b2f2bd408e17fcda is 175, key is test_row_0/A:col10/1732617366003/Put/seqid=0 2024-11-26T10:36:08,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742378_1554 (size=31105) 2024-11-26T10:36:08,021 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/baf8c84731704333b2f2bd408e17fcda 2024-11-26T10:36:08,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/1a15742591734d12ad15cab2a5c3adde is 50, key is test_row_0/B:col10/1732617366003/Put/seqid=0 2024-11-26T10:36:08,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742379_1555 (size=12151) 2024-11-26T10:36:08,029 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/1a15742591734d12ad15cab2a5c3adde 2024-11-26T10:36:08,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/95b30e803eb24bf095c886ab73c7716c is 50, key is test_row_0/C:col10/1732617366003/Put/seqid=0 2024-11-26T10:36:08,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742380_1556 (size=12151) 2024-11-26T10:36:08,038 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/95b30e803eb24bf095c886ab73c7716c 2024-11-26T10:36:08,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/baf8c84731704333b2f2bd408e17fcda as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/baf8c84731704333b2f2bd408e17fcda 2024-11-26T10:36:08,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-26T10:36:08,045 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/baf8c84731704333b2f2bd408e17fcda, entries=150, sequenceid=197, filesize=30.4 K 2024-11-26T10:36:08,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/1a15742591734d12ad15cab2a5c3adde as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/1a15742591734d12ad15cab2a5c3adde 2024-11-26T10:36:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,048 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/1a15742591734d12ad15cab2a5c3adde, entries=150, sequenceid=197, filesize=11.9 K 2024-11-26T10:36:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/95b30e803eb24bf095c886ab73c7716c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/95b30e803eb24bf095c886ab73c7716c 2024-11-26T10:36:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,051 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/95b30e803eb24bf095c886ab73c7716c, entries=150, sequenceid=197, filesize=11.9 K 2024-11-26T10:36:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,052 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for d75570331a075e2a47f6b93a7b93d8ef in 460ms, sequenceid=197, compaction requested=false 2024-11-26T10:36:08,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:08,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-26T10:36:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-26T10:36:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,054 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-26T10:36:08,054 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 612 msec 2024-11-26T10:36:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,055 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 614 msec 2024-11-26T10:36:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,064 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/7879169a41f841cd9e312077c58d8b49 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/7879169a41f841cd9e312077c58d8b49 2024-11-26T10:36:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,067 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/A of d75570331a075e2a47f6b93a7b93d8ef into 7879169a41f841cd9e312077c58d8b49(size=30.8 K), total size for store is 61.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:08,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,067 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:08,067 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/A, priority=13, startTime=1732617367229; duration=0sec 2024-11-26T10:36:08,067 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:08,067 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:A 2024-11-26T10:36:08,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,074 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/4008d5bc36b543ef9ed09eff7a610fc9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/4008d5bc36b543ef9ed09eff7a610fc9 2024-11-26T10:36:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,078 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/C of d75570331a075e2a47f6b93a7b93d8ef into 4008d5bc36b543ef9ed09eff7a610fc9(size=12.3 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:08,078 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:08,078 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/C, priority=13, startTime=1732617367230; duration=0sec 2024-11-26T10:36:08,078 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:08,078 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:C 2024-11-26T10:36:08,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:08,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-26T10:36:08,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:36:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:08,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:36:08,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:08,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:36:08,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126de43b26d507d4e7cb654b0140b6ddc19_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617368150/Put/seqid=0 2024-11-26T10:36:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742381_1557 (size=19774) 2024-11-26T10:36:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617428179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617428181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617428182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617428182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617428283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617428284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617428285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617428285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617428486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617428487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617428488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617428488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-26T10:36:08,543 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-26T10:36:08,543 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:36:08,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-26T10:36:08,545 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:36:08,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-26T10:36:08,545 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:36:08,545 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:36:08,568 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:08,570 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126de43b26d507d4e7cb654b0140b6ddc19_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126de43b26d507d4e7cb654b0140b6ddc19_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:08,570 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/fe2bead820744bae9ddd14f370277851, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:08,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/fe2bead820744bae9ddd14f370277851 is 175, key is test_row_0/A:col10/1732617368150/Put/seqid=0 2024-11-26T10:36:08,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742382_1558 (size=57033) 2024-11-26T10:36:08,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-26T10:36:08,696 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-26T10:36:08,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:08,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:08,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:08,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:08,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:08,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:08,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617428789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617428791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617428791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617428791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-26T10:36:08,848 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:08,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-26T10:36:08,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:08,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:08,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:08,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:08,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:08,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:08,974 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/fe2bead820744bae9ddd14f370277851 2024-11-26T10:36:08,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/a533eb02befe4792845f10655a1897e1 is 50, key is test_row_0/B:col10/1732617368150/Put/seqid=0 2024-11-26T10:36:08,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742383_1559 (size=12151) 2024-11-26T10:36:09,000 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:09,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-26T10:36:09,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:09,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:09,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:09,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:09,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:09,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:09,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-26T10:36:09,152 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:09,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-26T10:36:09,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:09,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:09,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:09,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:09,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:09,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:09,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:09,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617429292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:09,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:09,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617429294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:09,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:09,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617429295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:09,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:09,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617429296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:09,304 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:09,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-26T10:36:09,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:09,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:09,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:09,305 ERROR [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:09,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:09,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-26T10:36:09,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/a533eb02befe4792845f10655a1897e1 2024-11-26T10:36:09,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/d5966f2ec5ec4747904f3fa41d22ea2c is 50, key is test_row_0/C:col10/1732617368150/Put/seqid=0 2024-11-26T10:36:09,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742384_1560 (size=12151) 2024-11-26T10:36:09,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/d5966f2ec5ec4747904f3fa41d22ea2c 2024-11-26T10:36:09,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/fe2bead820744bae9ddd14f370277851 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe2bead820744bae9ddd14f370277851 2024-11-26T10:36:09,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe2bead820744bae9ddd14f370277851, entries=300, sequenceid=211, filesize=55.7 K 2024-11-26T10:36:09,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/a533eb02befe4792845f10655a1897e1 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/a533eb02befe4792845f10655a1897e1 2024-11-26T10:36:09,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/a533eb02befe4792845f10655a1897e1, entries=150, sequenceid=211, filesize=11.9 K 2024-11-26T10:36:09,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/d5966f2ec5ec4747904f3fa41d22ea2c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/d5966f2ec5ec4747904f3fa41d22ea2c 2024-11-26T10:36:09,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/d5966f2ec5ec4747904f3fa41d22ea2c, entries=150, sequenceid=211, filesize=11.9 K 2024-11-26T10:36:09,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for d75570331a075e2a47f6b93a7b93d8ef in 1248ms, sequenceid=211, compaction requested=true 2024-11-26T10:36:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:36:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:09,403 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:36:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:09,403 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:36:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:36:09,404 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:09,404 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:09,404 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/A is initiating minor compaction (all files) 2024-11-26T10:36:09,404 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/B is initiating minor compaction (all files) 2024-11-26T10:36:09,404 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/A in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:09,404 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/B in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:09,404 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/fc96412b1e044e53be8934336876edff, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/1a15742591734d12ad15cab2a5c3adde, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/a533eb02befe4792845f10655a1897e1] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=36.0 K 2024-11-26T10:36:09,404 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/7879169a41f841cd9e312077c58d8b49, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/baf8c84731704333b2f2bd408e17fcda, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe2bead820744bae9ddd14f370277851] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=116.8 K 2024-11-26T10:36:09,404 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:09,404 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/7879169a41f841cd9e312077c58d8b49, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/baf8c84731704333b2f2bd408e17fcda, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe2bead820744bae9ddd14f370277851] 2024-11-26T10:36:09,404 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting fc96412b1e044e53be8934336876edff, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732617365353 2024-11-26T10:36:09,404 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7879169a41f841cd9e312077c58d8b49, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732617365353 2024-11-26T10:36:09,405 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a15742591734d12ad15cab2a5c3adde, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732617366003 2024-11-26T10:36:09,405 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting baf8c84731704333b2f2bd408e17fcda, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732617366003 2024-11-26T10:36:09,405 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a533eb02befe4792845f10655a1897e1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732617368148 2024-11-26T10:36:09,405 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe2bead820744bae9ddd14f370277851, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732617368145 2024-11-26T10:36:09,410 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:09,411 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#B#compaction#473 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:09,411 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/b2b9319398034a23af8ebaa323bfb7a9 is 50, key is test_row_0/B:col10/1732617368150/Put/seqid=0 2024-11-26T10:36:09,412 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411263df10b86e4644d129f607a573f193825_d75570331a075e2a47f6b93a7b93d8ef store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:09,414 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411263df10b86e4644d129f607a573f193825_d75570331a075e2a47f6b93a7b93d8ef, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:09,415 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411263df10b86e4644d129f607a573f193825_d75570331a075e2a47f6b93a7b93d8ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:09,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742385_1561 (size=12663) 2024-11-26T10:36:09,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742386_1562 (size=4469) 2024-11-26T10:36:09,456 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:09,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-26T10:36:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:09,457 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-26T10:36:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:36:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:36:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:36:09,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:09,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411265e7bc568648b4203b5208d6d2d5382d9_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617368176/Put/seqid=0 2024-11-26T10:36:09,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742387_1563 (size=12304) 2024-11-26T10:36:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-26T10:36:09,833 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/b2b9319398034a23af8ebaa323bfb7a9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/b2b9319398034a23af8ebaa323bfb7a9 2024-11-26T10:36:09,836 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/B of d75570331a075e2a47f6b93a7b93d8ef into b2b9319398034a23af8ebaa323bfb7a9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:09,837 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:09,837 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/B, priority=13, startTime=1732617369403; duration=0sec 2024-11-26T10:36:09,837 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#A#compaction#474 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:09,837 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:36:09,837 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:B 2024-11-26T10:36:09,837 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:09,837 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/4bed93d5eab84146a3673a5ddcb49a63 is 175, key is test_row_0/A:col10/1732617368150/Put/seqid=0 2024-11-26T10:36:09,838 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:09,838 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/C is initiating minor compaction (all files) 2024-11-26T10:36:09,838 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/C in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:09,838 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/4008d5bc36b543ef9ed09eff7a610fc9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/95b30e803eb24bf095c886ab73c7716c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/d5966f2ec5ec4747904f3fa41d22ea2c] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=36.0 K 2024-11-26T10:36:09,839 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 4008d5bc36b543ef9ed09eff7a610fc9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732617365353 2024-11-26T10:36:09,839 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 95b30e803eb24bf095c886ab73c7716c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732617366003 2024-11-26T10:36:09,839 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting d5966f2ec5ec4747904f3fa41d22ea2c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732617368148 2024-11-26T10:36:09,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742388_1564 (size=31617) 2024-11-26T10:36:09,845 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#C#compaction#476 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:09,845 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/50e68e99b0c6417496f7ad6fd18c510a is 50, key is test_row_0/C:col10/1732617368150/Put/seqid=0 2024-11-26T10:36:09,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:09,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742389_1565 (size=12663) 2024-11-26T10:36:09,869 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411265e7bc568648b4203b5208d6d2d5382d9_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411265e7bc568648b4203b5208d6d2d5382d9_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:09,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/f89b8ed7bf584131a5f35f9fa14d3d8e, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:09,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/f89b8ed7bf584131a5f35f9fa14d3d8e is 175, key is test_row_0/A:col10/1732617368176/Put/seqid=0 2024-11-26T10:36:09,872 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/50e68e99b0c6417496f7ad6fd18c510a as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/50e68e99b0c6417496f7ad6fd18c510a 2024-11-26T10:36:09,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742390_1566 (size=31105) 2024-11-26T10:36:09,875 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/f89b8ed7bf584131a5f35f9fa14d3d8e 2024-11-26T10:36:09,876 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/C of d75570331a075e2a47f6b93a7b93d8ef into 50e68e99b0c6417496f7ad6fd18c510a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:09,876 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:09,876 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/C, priority=13, startTime=1732617369403; duration=0sec 2024-11-26T10:36:09,876 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:09,876 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:C 2024-11-26T10:36:09,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/9720a8cfa8554d7dbeb6ff2d5e897b21 is 50, key is test_row_0/B:col10/1732617368176/Put/seqid=0 2024-11-26T10:36:09,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742391_1567 (size=12151) 2024-11-26T10:36:10,245 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/4bed93d5eab84146a3673a5ddcb49a63 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/4bed93d5eab84146a3673a5ddcb49a63 2024-11-26T10:36:10,248 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/A of d75570331a075e2a47f6b93a7b93d8ef into 4bed93d5eab84146a3673a5ddcb49a63(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:10,248 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:10,248 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/A, priority=13, startTime=1732617369403; duration=0sec 2024-11-26T10:36:10,248 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:10,248 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:A 2024-11-26T10:36:10,283 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/9720a8cfa8554d7dbeb6ff2d5e897b21 2024-11-26T10:36:10,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a5d4183e4bdd40e7a33152491afd69da is 50, key is test_row_0/C:col10/1732617368176/Put/seqid=0 2024-11-26T10:36:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742392_1568 (size=12151) 2024-11-26T10:36:10,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:10,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:10,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617430305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617430307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617430307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617430308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617430408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617430409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617430410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617430411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617430611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617430611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617430611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617430612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-26T10:36:10,693 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a5d4183e4bdd40e7a33152491afd69da 2024-11-26T10:36:10,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/f89b8ed7bf584131a5f35f9fa14d3d8e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f89b8ed7bf584131a5f35f9fa14d3d8e 2024-11-26T10:36:10,699 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f89b8ed7bf584131a5f35f9fa14d3d8e, entries=150, sequenceid=237, filesize=30.4 K 2024-11-26T10:36:10,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/9720a8cfa8554d7dbeb6ff2d5e897b21 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9720a8cfa8554d7dbeb6ff2d5e897b21 2024-11-26T10:36:10,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,701 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9720a8cfa8554d7dbeb6ff2d5e897b21, entries=150, sequenceid=237, filesize=11.9 K 2024-11-26T10:36:10,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a5d4183e4bdd40e7a33152491afd69da as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a5d4183e4bdd40e7a33152491afd69da 2024-11-26T10:36:10,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,704 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a5d4183e4bdd40e7a33152491afd69da, entries=150, sequenceid=237, filesize=11.9 K 2024-11-26T10:36:10,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,705 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for d75570331a075e2a47f6b93a7b93d8ef in 1248ms, sequenceid=237, compaction requested=false 2024-11-26T10:36:10,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:10,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:10,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-26T10:36:10,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-26T10:36:10,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,707 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-26T10:36:10,707 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1610 sec 2024-11-26T10:36:10,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,708 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.1630 sec 2024-11-26T10:36:10,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:10,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:10,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-26T10:36:10,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:36:10,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:10,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:36:10,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:10,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:36:10,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:10,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112642600535279a4bb29c1c5d28803cd202_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617370306/Put/seqid=0 2024-11-26T10:36:10,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742393_1569 (size=12304) 2024-11-26T10:36:10,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617430940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617430940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617430942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:10,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:10,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617430943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617431044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617431044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617431047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617431047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617431247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617431249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617431249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617431251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,332 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:11,335 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112642600535279a4bb29c1c5d28803cd202_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112642600535279a4bb29c1c5d28803cd202_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:11,336 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/495b63dabe944d17b30cd304fa65a146, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:11,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/495b63dabe944d17b30cd304fa65a146 is 175, key is test_row_0/A:col10/1732617370306/Put/seqid=0 2024-11-26T10:36:11,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742394_1570 (size=31105) 2024-11-26T10:36:11,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617431550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617431551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617431553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:11,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617431553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:11,740 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/495b63dabe944d17b30cd304fa65a146 2024-11-26T10:36:11,745 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/e27945e0b01b4a8d895eb2dd778e97a4 is 50, key is test_row_0/B:col10/1732617370306/Put/seqid=0 2024-11-26T10:36:11,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742395_1571 (size=12151) 2024-11-26T10:36:12,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:12,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34506 deadline: 1732617432054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:12,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:12,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34490 deadline: 1732617432056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:12,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:12,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34508 deadline: 1732617432056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:12,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-26T10:36:12,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45419 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34504 deadline: 1732617432058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:12,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/e27945e0b01b4a8d895eb2dd778e97a4 2024-11-26T10:36:12,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a3b5d677777647a8bee90f0e32eb2406 is 50, key is test_row_0/C:col10/1732617370306/Put/seqid=0 2024-11-26T10:36:12,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742396_1572 (size=12151) 2024-11-26T10:36:12,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a3b5d677777647a8bee90f0e32eb2406 2024-11-26T10:36:12,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/495b63dabe944d17b30cd304fa65a146 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/495b63dabe944d17b30cd304fa65a146 2024-11-26T10:36:12,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/495b63dabe944d17b30cd304fa65a146, entries=150, sequenceid=253, filesize=30.4 K 2024-11-26T10:36:12,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/e27945e0b01b4a8d895eb2dd778e97a4 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/e27945e0b01b4a8d895eb2dd778e97a4 2024-11-26T10:36:12,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/e27945e0b01b4a8d895eb2dd778e97a4, entries=150, sequenceid=253, filesize=11.9 K 2024-11-26T10:36:12,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/a3b5d677777647a8bee90f0e32eb2406 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a3b5d677777647a8bee90f0e32eb2406 2024-11-26T10:36:12,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a3b5d677777647a8bee90f0e32eb2406, entries=150, sequenceid=253, filesize=11.9 K 2024-11-26T10:36:12,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d75570331a075e2a47f6b93a7b93d8ef in 1677ms, sequenceid=253, compaction requested=true 2024-11-26T10:36:12,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:12,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-26T10:36:12,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:12,595 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:12,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-26T10:36:12,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:12,595 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:12,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d75570331a075e2a47f6b93a7b93d8ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-26T10:36:12,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:36:12,596 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:12,596 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93827 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:12,596 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/B is initiating minor compaction (all files) 2024-11-26T10:36:12,596 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/A is initiating minor compaction (all files) 2024-11-26T10:36:12,596 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/B in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:12,596 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/A in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:12,596 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/b2b9319398034a23af8ebaa323bfb7a9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9720a8cfa8554d7dbeb6ff2d5e897b21, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/e27945e0b01b4a8d895eb2dd778e97a4] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=36.1 K 2024-11-26T10:36:12,596 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/4bed93d5eab84146a3673a5ddcb49a63, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f89b8ed7bf584131a5f35f9fa14d3d8e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/495b63dabe944d17b30cd304fa65a146] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=91.6 K 2024-11-26T10:36:12,596 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:12,596 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. files: [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/4bed93d5eab84146a3673a5ddcb49a63, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f89b8ed7bf584131a5f35f9fa14d3d8e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/495b63dabe944d17b30cd304fa65a146] 2024-11-26T10:36:12,596 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting b2b9319398034a23af8ebaa323bfb7a9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732617368148 2024-11-26T10:36:12,596 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4bed93d5eab84146a3673a5ddcb49a63, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732617368148 2024-11-26T10:36:12,597 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 9720a8cfa8554d7dbeb6ff2d5e897b21, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732617368176 2024-11-26T10:36:12,597 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting f89b8ed7bf584131a5f35f9fa14d3d8e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732617368176 2024-11-26T10:36:12,597 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting e27945e0b01b4a8d895eb2dd778e97a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732617370306 2024-11-26T10:36:12,597 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] compactions.Compactor(224): Compacting 495b63dabe944d17b30cd304fa65a146, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732617370306 2024-11-26T10:36:12,602 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#B#compaction#482 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:12,602 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/d10f4ccf934446c2af19bfad646d9090 is 50, key is test_row_0/B:col10/1732617370306/Put/seqid=0 2024-11-26T10:36:12,603 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:12,605 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241126d480bd4be4ba43119521f1723a69580f_d75570331a075e2a47f6b93a7b93d8ef store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:12,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742397_1573 (size=12765) 2024-11-26T10:36:12,607 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241126d480bd4be4ba43119521f1723a69580f_d75570331a075e2a47f6b93a7b93d8ef, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:12,607 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126d480bd4be4ba43119521f1723a69580f_d75570331a075e2a47f6b93a7b93d8ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:12,609 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/d10f4ccf934446c2af19bfad646d9090 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d10f4ccf934446c2af19bfad646d9090 2024-11-26T10:36:12,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742398_1574 (size=4469) 2024-11-26T10:36:12,611 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#A#compaction#483 average throughput is 3.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:12,612 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/29034792166646ce8b4c085804d68d89 is 175, key is test_row_0/A:col10/1732617370306/Put/seqid=0 2024-11-26T10:36:12,613 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/B of d75570331a075e2a47f6b93a7b93d8ef into d10f4ccf934446c2af19bfad646d9090(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:12,613 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:12,613 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/B, priority=13, startTime=1732617372595; duration=0sec 2024-11-26T10:36:12,613 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-26T10:36:12,613 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:B 2024-11-26T10:36:12,613 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-26T10:36:12,614 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-26T10:36:12,614 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1540): d75570331a075e2a47f6b93a7b93d8ef/C is initiating minor compaction (all files) 2024-11-26T10:36:12,614 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d75570331a075e2a47f6b93a7b93d8ef/C in TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:12,614 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/50e68e99b0c6417496f7ad6fd18c510a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a5d4183e4bdd40e7a33152491afd69da, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a3b5d677777647a8bee90f0e32eb2406] into tmpdir=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp, totalSize=36.1 K 2024-11-26T10:36:12,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742399_1575 (size=31719) 2024-11-26T10:36:12,615 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting 50e68e99b0c6417496f7ad6fd18c510a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732617368148 2024-11-26T10:36:12,615 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a5d4183e4bdd40e7a33152491afd69da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732617368176 2024-11-26T10:36:12,616 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] compactions.Compactor(224): Compacting a3b5d677777647a8bee90f0e32eb2406, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732617370306 2024-11-26T10:36:12,619 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/29034792166646ce8b4c085804d68d89 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/29034792166646ce8b4c085804d68d89 2024-11-26T10:36:12,623 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d75570331a075e2a47f6b93a7b93d8ef#C#compaction#484 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-26T10:36:12,623 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/bbcb3db5adf44622864f431b1dee2b8d is 50, key is test_row_0/C:col10/1732617370306/Put/seqid=0 2024-11-26T10:36:12,624 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/A of d75570331a075e2a47f6b93a7b93d8ef into 29034792166646ce8b4c085804d68d89(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:12,624 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:12,624 INFO [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/A, priority=13, startTime=1732617372595; duration=0sec 2024-11-26T10:36:12,624 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:12,624 DEBUG [RS:0;ccf62758a0a5:45419-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:A 2024-11-26T10:36:12,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742400_1576 (size=12765) 2024-11-26T10:36:12,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-26T10:36:12,648 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-26T10:36:12,649 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-26T10:36:12,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-26T10:36:12,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-26T10:36:12,650 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-26T10:36:12,651 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-26T10:36:12,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-26T10:36:12,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-26T10:36:12,802 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:12,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45419 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-26T10:36:12,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:12,802 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-26T10:36:12,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:36:12,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:12,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:36:12,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:12,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:36:12,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:12,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112665e170b6dfd4433abcacf3e38ad66ce7_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_0/A:col10/1732617370942/Put/seqid=0 2024-11-26T10:36:12,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742401_1577 (size=12454) 2024-11-26T10:36:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-26T10:36:12,978 DEBUG [Thread-2255 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d836f78 to 127.0.0.1:61934 2024-11-26T10:36:12,978 DEBUG [Thread-2255 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:12,979 DEBUG [Thread-2259 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bb6288a to 127.0.0.1:61934 2024-11-26T10:36:12,979 DEBUG [Thread-2259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:12,979 DEBUG [Thread-2261 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:61934 2024-11-26T10:36:12,979 DEBUG [Thread-2261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:12,980 DEBUG [Thread-2263 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x458a85fd to 127.0.0.1:61934 2024-11-26T10:36:12,980 DEBUG [Thread-2263 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:12,980 DEBUG [Thread-2257 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53305d9b to 127.0.0.1:61934 2024-11-26T10:36:12,980 DEBUG [Thread-2257 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:13,034 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/bbcb3db5adf44622864f431b1dee2b8d as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/bbcb3db5adf44622864f431b1dee2b8d 2024-11-26T10:36:13,041 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d75570331a075e2a47f6b93a7b93d8ef/C of d75570331a075e2a47f6b93a7b93d8ef into bbcb3db5adf44622864f431b1dee2b8d(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-26T10:36:13,041 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:13,041 INFO [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef., storeName=d75570331a075e2a47f6b93a7b93d8ef/C, priority=13, startTime=1732617372595; duration=0sec 2024-11-26T10:36:13,041 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-26T10:36:13,041 DEBUG [RS:0;ccf62758a0a5:45419-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d75570331a075e2a47f6b93a7b93d8ef:C 2024-11-26T10:36:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45419 {}] regionserver.HRegion(8581): Flush requested on d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:13,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. as already flushing 2024-11-26T10:36:13,060 DEBUG [Thread-2252 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b914bf4 to 127.0.0.1:61934 2024-11-26T10:36:13,060 DEBUG [Thread-2252 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:13,063 DEBUG [Thread-2248 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17327621 to 127.0.0.1:61934 2024-11-26T10:36:13,063 DEBUG [Thread-2248 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:13,066 DEBUG [Thread-2250 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1584f18a to 127.0.0.1:61934 2024-11-26T10:36:13,066 DEBUG [Thread-2250 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:13,071 DEBUG [Thread-2244 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d5efb7a to 127.0.0.1:61934 2024-11-26T10:36:13,071 DEBUG [Thread-2244 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:13,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:13,219 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112665e170b6dfd4433abcacf3e38ad66ce7_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112665e170b6dfd4433abcacf3e38ad66ce7_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:13,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/a53fec68f42b4694a4b2a203774167bb, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:13,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/a53fec68f42b4694a4b2a203774167bb is 175, key is test_row_0/A:col10/1732617370942/Put/seqid=0 2024-11-26T10:36:13,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742402_1578 (size=31255) 2024-11-26T10:36:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-26T10:36:13,311 DEBUG [Thread-2246 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fc332d8 to 127.0.0.1:61934 2024-11-26T10:36:13,311 DEBUG [Thread-2246 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:13,627 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/a53fec68f42b4694a4b2a203774167bb 2024-11-26T10:36:13,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/9b3600f6edeb4c14a16d3b9ce394007e is 50, key is test_row_0/B:col10/1732617370942/Put/seqid=0 2024-11-26T10:36:13,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742403_1579 (size=12301) 2024-11-26T10:36:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-26T10:36:14,044 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/9b3600f6edeb4c14a16d3b9ce394007e 2024-11-26T10:36:14,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/0f2b9d9c9ae7480daea2d6dd31910a18 is 50, key is test_row_0/C:col10/1732617370942/Put/seqid=0 2024-11-26T10:36:14,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742404_1580 (size=12301) 2024-11-26T10:36:14,465 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/0f2b9d9c9ae7480daea2d6dd31910a18 2024-11-26T10:36:14,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/a53fec68f42b4694a4b2a203774167bb as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/a53fec68f42b4694a4b2a203774167bb 2024-11-26T10:36:14,477 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/a53fec68f42b4694a4b2a203774167bb, entries=150, sequenceid=278, filesize=30.5 K 2024-11-26T10:36:14,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/9b3600f6edeb4c14a16d3b9ce394007e as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9b3600f6edeb4c14a16d3b9ce394007e 2024-11-26T10:36:14,481 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9b3600f6edeb4c14a16d3b9ce394007e, entries=150, sequenceid=278, filesize=12.0 K 2024-11-26T10:36:14,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/0f2b9d9c9ae7480daea2d6dd31910a18 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/0f2b9d9c9ae7480daea2d6dd31910a18 2024-11-26T10:36:14,485 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/0f2b9d9c9ae7480daea2d6dd31910a18, entries=150, sequenceid=278, filesize=12.0 K 2024-11-26T10:36:14,485 INFO [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=33.54 KB/34350 for d75570331a075e2a47f6b93a7b93d8ef in 1683ms, sequenceid=278, compaction requested=false 2024-11-26T10:36:14,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:14,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:14,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ccf62758a0a5:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-26T10:36:14,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-26T10:36:14,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-26T10:36:14,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8350 sec 2024-11-26T10:36:14,488 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.8380 sec 2024-11-26T10:36:14,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-26T10:36:14,757 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-26T10:36:14,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-26T10:36:14,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-26T10:36:14,758 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 10 2024-11-26T10:36:14,758 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-26T10:36:14,758 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-11-26T10:36:14,758 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-26T10:36:14,758 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-26T10:36:14,758 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7376 2024-11-26T10:36:14,758 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7215 2024-11-26T10:36:14,758 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7291 2024-11-26T10:36:14,758 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7381 2024-11-26T10:36:14,758 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7240 2024-11-26T10:36:14,758 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-26T10:36:14,758 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-26T10:36:14,758 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d9954b7 to 127.0.0.1:61934 2024-11-26T10:36:14,758 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:14,760 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-26T10:36:14,760 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-26T10:36:14,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-26T10:36:14,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-26T10:36:14,766 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617374765"}]},"ts":"1732617374765"} 2024-11-26T10:36:14,767 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-26T10:36:14,807 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-26T10:36:14,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-26T10:36:14,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d75570331a075e2a47f6b93a7b93d8ef, UNASSIGN}] 2024-11-26T10:36:14,811 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d75570331a075e2a47f6b93a7b93d8ef, UNASSIGN 2024-11-26T10:36:14,812 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=d75570331a075e2a47f6b93a7b93d8ef, regionState=CLOSING, regionLocation=ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:14,813 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-26T10:36:14,813 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; CloseRegionProcedure d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877}] 2024-11-26T10:36:14,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-26T10:36:14,966 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:14,966 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(124): Close d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:14,966 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-26T10:36:14,966 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1681): Closing d75570331a075e2a47f6b93a7b93d8ef, disabling compactions & flushes 2024-11-26T10:36:14,966 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:14,966 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:14,966 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. after waiting 0 ms 2024-11-26T10:36:14,966 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:14,967 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(2837): Flushing d75570331a075e2a47f6b93a7b93d8ef 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-26T10:36:14,967 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=A 2024-11-26T10:36:14,967 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:14,967 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=B 2024-11-26T10:36:14,967 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:14,967 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d75570331a075e2a47f6b93a7b93d8ef, store=C 2024-11-26T10:36:14,967 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-26T10:36:14,973 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126c5566c2648154ae6843179f239b4b5b7_d75570331a075e2a47f6b93a7b93d8ef is 50, key is test_row_1/A:col10/1732617373064/Put/seqid=0 2024-11-26T10:36:14,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742405_1581 (size=9914) 2024-11-26T10:36:15,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-26T10:36:15,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-26T10:36:15,378 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-26T10:36:15,388 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241126c5566c2648154ae6843179f239b4b5b7_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126c5566c2648154ae6843179f239b4b5b7_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:15,389 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/eaa2353917614a8dacf22f8507f5c3b2, store: [table=TestAcidGuarantees family=A region=d75570331a075e2a47f6b93a7b93d8ef] 2024-11-26T10:36:15,389 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/eaa2353917614a8dacf22f8507f5c3b2 is 175, key is test_row_1/A:col10/1732617373064/Put/seqid=0 2024-11-26T10:36:15,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742406_1582 (size=22561) 2024-11-26T10:36:15,795 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=287, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/eaa2353917614a8dacf22f8507f5c3b2 2024-11-26T10:36:15,808 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/5b9ec4cdf3b247899f03fd727569b17f is 50, key is test_row_1/B:col10/1732617373064/Put/seqid=0 2024-11-26T10:36:15,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742407_1583 (size=9857) 2024-11-26T10:36:15,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-26T10:36:16,213 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/5b9ec4cdf3b247899f03fd727569b17f 2024-11-26T10:36:16,227 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/2f036b2e87e94cbd870c32c009d45eff is 50, key is test_row_1/C:col10/1732617373064/Put/seqid=0 2024-11-26T10:36:16,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742408_1584 (size=9857) 2024-11-26T10:36:16,631 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/2f036b2e87e94cbd870c32c009d45eff 2024-11-26T10:36:16,635 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/A/eaa2353917614a8dacf22f8507f5c3b2 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/eaa2353917614a8dacf22f8507f5c3b2 2024-11-26T10:36:16,639 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/eaa2353917614a8dacf22f8507f5c3b2, entries=100, sequenceid=287, filesize=22.0 K 2024-11-26T10:36:16,640 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/B/5b9ec4cdf3b247899f03fd727569b17f as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5b9ec4cdf3b247899f03fd727569b17f 2024-11-26T10:36:16,645 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5b9ec4cdf3b247899f03fd727569b17f, entries=100, sequenceid=287, filesize=9.6 K 2024-11-26T10:36:16,646 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/.tmp/C/2f036b2e87e94cbd870c32c009d45eff as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/2f036b2e87e94cbd870c32c009d45eff 2024-11-26T10:36:16,651 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/2f036b2e87e94cbd870c32c009d45eff, entries=100, sequenceid=287, filesize=9.6 K 2024-11-26T10:36:16,652 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for d75570331a075e2a47f6b93a7b93d8ef in 1686ms, sequenceid=287, compaction requested=true 2024-11-26T10:36:16,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/dfa477d503184025a31b8ee03366e902, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/bdb7f4a78d9146e5a7fb62734bc585b8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe88a830c3424271ae75faea8f0bcc05, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f250bb6da09b4dcbb6128fd08fe559f0, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/2423a91096bb4de7992fbccf69edeb65, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/ae06bd77d73c4002844d58007cfaadd4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/d7ca4afe7ae646d58665e0602cfd61c8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c4bfa07f110045498b974c336fdb7fe8, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/3db3ed434d614145bee6a92a29a21979, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/e0877ba7e32b4dc5aea5d8bddecf7519, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c36e341168b749c8ab1026b9c2671d97, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/78dad1e7699e4b18a1e3f33235ba9226, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/7879169a41f841cd9e312077c58d8b49, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/baf8c84731704333b2f2bd408e17fcda, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe2bead820744bae9ddd14f370277851, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/4bed93d5eab84146a3673a5ddcb49a63, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f89b8ed7bf584131a5f35f9fa14d3d8e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/495b63dabe944d17b30cd304fa65a146] to archive 2024-11-26T10:36:16,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:36:16,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/dfa477d503184025a31b8ee03366e902 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/dfa477d503184025a31b8ee03366e902 2024-11-26T10:36:16,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/bdb7f4a78d9146e5a7fb62734bc585b8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/bdb7f4a78d9146e5a7fb62734bc585b8 2024-11-26T10:36:16,659 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe88a830c3424271ae75faea8f0bcc05 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe88a830c3424271ae75faea8f0bcc05 2024-11-26T10:36:16,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f250bb6da09b4dcbb6128fd08fe559f0 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f250bb6da09b4dcbb6128fd08fe559f0 2024-11-26T10:36:16,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/2423a91096bb4de7992fbccf69edeb65 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/2423a91096bb4de7992fbccf69edeb65 2024-11-26T10:36:16,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/ae06bd77d73c4002844d58007cfaadd4 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/ae06bd77d73c4002844d58007cfaadd4 2024-11-26T10:36:16,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/d7ca4afe7ae646d58665e0602cfd61c8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/d7ca4afe7ae646d58665e0602cfd61c8 2024-11-26T10:36:16,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c4bfa07f110045498b974c336fdb7fe8 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c4bfa07f110045498b974c336fdb7fe8 2024-11-26T10:36:16,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/3db3ed434d614145bee6a92a29a21979 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/3db3ed434d614145bee6a92a29a21979 2024-11-26T10:36:16,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/e0877ba7e32b4dc5aea5d8bddecf7519 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/e0877ba7e32b4dc5aea5d8bddecf7519 2024-11-26T10:36:16,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c36e341168b749c8ab1026b9c2671d97 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/c36e341168b749c8ab1026b9c2671d97 2024-11-26T10:36:16,672 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/78dad1e7699e4b18a1e3f33235ba9226 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/78dad1e7699e4b18a1e3f33235ba9226 2024-11-26T10:36:16,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/7879169a41f841cd9e312077c58d8b49 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/7879169a41f841cd9e312077c58d8b49 2024-11-26T10:36:16,675 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/baf8c84731704333b2f2bd408e17fcda to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/baf8c84731704333b2f2bd408e17fcda 2024-11-26T10:36:16,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe2bead820744bae9ddd14f370277851 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/fe2bead820744bae9ddd14f370277851 2024-11-26T10:36:16,678 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/4bed93d5eab84146a3673a5ddcb49a63 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/4bed93d5eab84146a3673a5ddcb49a63 2024-11-26T10:36:16,680 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f89b8ed7bf584131a5f35f9fa14d3d8e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/f89b8ed7bf584131a5f35f9fa14d3d8e 2024-11-26T10:36:16,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/495b63dabe944d17b30cd304fa65a146 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/495b63dabe944d17b30cd304fa65a146 2024-11-26T10:36:16,683 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/43a1edd4255e467083ce7f78917a5b8f, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9302b6982bec4e5e89443c8f6e934f4d, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/1283c3ea48dd436783391d1aa24b0cc5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5fd9ff7c3aab4d4ab6fbba0b3e94b75c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5bee5d19ab7743699ee824769e0f3cc6, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d5af0ec9e6bd447682d99e944967be39, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/05a13d1c87064c8686af7304e9028d7b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/24825155c5ae4205b95b931386fe5502, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/b2d8a0524edc4c1dbf627fa8873a8ec5, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/c0d486e68530410e85170092d9b5a851, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/02acbfb445e449cd84125ab9d4407ddc, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/fc96412b1e044e53be8934336876edff, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d3bd09d69f5347d181fc5d892ee6715e, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/1a15742591734d12ad15cab2a5c3adde, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/b2b9319398034a23af8ebaa323bfb7a9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/a533eb02befe4792845f10655a1897e1, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9720a8cfa8554d7dbeb6ff2d5e897b21, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/e27945e0b01b4a8d895eb2dd778e97a4] to archive 2024-11-26T10:36:16,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:36:16,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/43a1edd4255e467083ce7f78917a5b8f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/43a1edd4255e467083ce7f78917a5b8f 2024-11-26T10:36:16,687 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9302b6982bec4e5e89443c8f6e934f4d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9302b6982bec4e5e89443c8f6e934f4d 2024-11-26T10:36:16,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/1283c3ea48dd436783391d1aa24b0cc5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/1283c3ea48dd436783391d1aa24b0cc5 2024-11-26T10:36:16,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5fd9ff7c3aab4d4ab6fbba0b3e94b75c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5fd9ff7c3aab4d4ab6fbba0b3e94b75c 2024-11-26T10:36:16,691 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5bee5d19ab7743699ee824769e0f3cc6 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5bee5d19ab7743699ee824769e0f3cc6 2024-11-26T10:36:16,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d5af0ec9e6bd447682d99e944967be39 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d5af0ec9e6bd447682d99e944967be39 2024-11-26T10:36:16,695 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/05a13d1c87064c8686af7304e9028d7b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/05a13d1c87064c8686af7304e9028d7b 2024-11-26T10:36:16,696 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/24825155c5ae4205b95b931386fe5502 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/24825155c5ae4205b95b931386fe5502 2024-11-26T10:36:16,698 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/b2d8a0524edc4c1dbf627fa8873a8ec5 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/b2d8a0524edc4c1dbf627fa8873a8ec5 2024-11-26T10:36:16,700 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/c0d486e68530410e85170092d9b5a851 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/c0d486e68530410e85170092d9b5a851 2024-11-26T10:36:16,702 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/02acbfb445e449cd84125ab9d4407ddc to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/02acbfb445e449cd84125ab9d4407ddc 2024-11-26T10:36:16,704 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/fc96412b1e044e53be8934336876edff to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/fc96412b1e044e53be8934336876edff 2024-11-26T10:36:16,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d3bd09d69f5347d181fc5d892ee6715e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d3bd09d69f5347d181fc5d892ee6715e 2024-11-26T10:36:16,707 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/1a15742591734d12ad15cab2a5c3adde to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/1a15742591734d12ad15cab2a5c3adde 2024-11-26T10:36:16,709 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/b2b9319398034a23af8ebaa323bfb7a9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/b2b9319398034a23af8ebaa323bfb7a9 2024-11-26T10:36:16,711 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/a533eb02befe4792845f10655a1897e1 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/a533eb02befe4792845f10655a1897e1 2024-11-26T10:36:16,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9720a8cfa8554d7dbeb6ff2d5e897b21 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9720a8cfa8554d7dbeb6ff2d5e897b21 2024-11-26T10:36:16,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/e27945e0b01b4a8d895eb2dd778e97a4 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/e27945e0b01b4a8d895eb2dd778e97a4 2024-11-26T10:36:16,715 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/0a84e899b3ec44bca146059214289653, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/18e7fe6407cc4101a1b0de2426535924, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/b7136858b3ba4008ba59a04c2495a1ca, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/8644a96608fd4352813611212faabae4, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a330525e89824e0a956e041acbf2c058, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/84a4a975b3ff475f857b2c159184b2ff, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a196fcfc64f94d2a8843070dddc32270, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/f0d912d26fef4f5494ad26c4e88f3080, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a6ffafb917964ad99ffc380da04e750b, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/934382791ad54ef39561eece2c2dbbe3, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/3eeeaadf0573427baf4940f339865a38, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/4008d5bc36b543ef9ed09eff7a610fc9, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/1f32a3763af04201b6f51c5e4ea77d6c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/95b30e803eb24bf095c886ab73c7716c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/50e68e99b0c6417496f7ad6fd18c510a, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/d5966f2ec5ec4747904f3fa41d22ea2c, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a5d4183e4bdd40e7a33152491afd69da, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a3b5d677777647a8bee90f0e32eb2406] to archive 2024-11-26T10:36:16,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-26T10:36:16,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/0a84e899b3ec44bca146059214289653 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/0a84e899b3ec44bca146059214289653 2024-11-26T10:36:16,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/18e7fe6407cc4101a1b0de2426535924 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/18e7fe6407cc4101a1b0de2426535924 2024-11-26T10:36:16,724 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/b7136858b3ba4008ba59a04c2495a1ca to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/b7136858b3ba4008ba59a04c2495a1ca 2024-11-26T10:36:16,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/8644a96608fd4352813611212faabae4 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/8644a96608fd4352813611212faabae4 2024-11-26T10:36:16,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a330525e89824e0a956e041acbf2c058 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a330525e89824e0a956e041acbf2c058 2024-11-26T10:36:16,728 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/84a4a975b3ff475f857b2c159184b2ff to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/84a4a975b3ff475f857b2c159184b2ff 2024-11-26T10:36:16,728 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a196fcfc64f94d2a8843070dddc32270 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a196fcfc64f94d2a8843070dddc32270 2024-11-26T10:36:16,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/f0d912d26fef4f5494ad26c4e88f3080 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/f0d912d26fef4f5494ad26c4e88f3080 2024-11-26T10:36:16,730 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a6ffafb917964ad99ffc380da04e750b to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a6ffafb917964ad99ffc380da04e750b 2024-11-26T10:36:16,731 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/934382791ad54ef39561eece2c2dbbe3 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/934382791ad54ef39561eece2c2dbbe3 2024-11-26T10:36:16,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/3eeeaadf0573427baf4940f339865a38 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/3eeeaadf0573427baf4940f339865a38 2024-11-26T10:36:16,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/4008d5bc36b543ef9ed09eff7a610fc9 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/4008d5bc36b543ef9ed09eff7a610fc9 2024-11-26T10:36:16,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/1f32a3763af04201b6f51c5e4ea77d6c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/1f32a3763af04201b6f51c5e4ea77d6c 2024-11-26T10:36:16,734 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/95b30e803eb24bf095c886ab73c7716c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/95b30e803eb24bf095c886ab73c7716c 2024-11-26T10:36:16,734 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/50e68e99b0c6417496f7ad6fd18c510a to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/50e68e99b0c6417496f7ad6fd18c510a 2024-11-26T10:36:16,735 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/d5966f2ec5ec4747904f3fa41d22ea2c to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/d5966f2ec5ec4747904f3fa41d22ea2c 2024-11-26T10:36:16,736 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a5d4183e4bdd40e7a33152491afd69da to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a5d4183e4bdd40e7a33152491afd69da 2024-11-26T10:36:16,737 DEBUG [StoreCloser-TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a3b5d677777647a8bee90f0e32eb2406 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/a3b5d677777647a8bee90f0e32eb2406 2024-11-26T10:36:16,740 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/recovered.edits/290.seqid, newMaxSeqId=290, maxSeqId=4 2024-11-26T10:36:16,741 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef. 2024-11-26T10:36:16,741 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1635): Region close journal for d75570331a075e2a47f6b93a7b93d8ef: 2024-11-26T10:36:16,742 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(170): Closed d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,742 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=d75570331a075e2a47f6b93a7b93d8ef, regionState=CLOSED 2024-11-26T10:36:16,744 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-26T10:36:16,744 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseRegionProcedure d75570331a075e2a47f6b93a7b93d8ef, server=ccf62758a0a5,45419,1732617185877 in 1.9300 sec 2024-11-26T10:36:16,746 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-11-26T10:36:16,746 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d75570331a075e2a47f6b93a7b93d8ef, UNASSIGN in 1.9340 sec 2024-11-26T10:36:16,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-26T10:36:16,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9380 sec 2024-11-26T10:36:16,748 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732617376748"}]},"ts":"1732617376748"} 2024-11-26T10:36:16,749 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-26T10:36:16,782 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-26T10:36:16,785 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0220 sec 2024-11-26T10:36:16,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-26T10:36:16,875 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-26T10:36:16,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-26T10:36:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:36:16,879 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:36:16,881 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=171, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:36:16,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-26T10:36:16,885 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,888 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C, FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/recovered.edits] 2024-11-26T10:36:16,891 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/29034792166646ce8b4c085804d68d89 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/29034792166646ce8b4c085804d68d89 2024-11-26T10:36:16,892 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/a53fec68f42b4694a4b2a203774167bb to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/a53fec68f42b4694a4b2a203774167bb 2024-11-26T10:36:16,893 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/eaa2353917614a8dacf22f8507f5c3b2 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/A/eaa2353917614a8dacf22f8507f5c3b2 2024-11-26T10:36:16,895 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5b9ec4cdf3b247899f03fd727569b17f to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/5b9ec4cdf3b247899f03fd727569b17f 2024-11-26T10:36:16,896 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9b3600f6edeb4c14a16d3b9ce394007e to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/9b3600f6edeb4c14a16d3b9ce394007e 2024-11-26T10:36:16,897 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d10f4ccf934446c2af19bfad646d9090 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/B/d10f4ccf934446c2af19bfad646d9090 2024-11-26T10:36:16,900 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/0f2b9d9c9ae7480daea2d6dd31910a18 to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/0f2b9d9c9ae7480daea2d6dd31910a18 2024-11-26T10:36:16,901 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/2f036b2e87e94cbd870c32c009d45eff to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/2f036b2e87e94cbd870c32c009d45eff 2024-11-26T10:36:16,903 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/bbcb3db5adf44622864f431b1dee2b8d to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/C/bbcb3db5adf44622864f431b1dee2b8d 2024-11-26T10:36:16,905 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/recovered.edits/290.seqid to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef/recovered.edits/290.seqid 2024-11-26T10:36:16,906 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/default/TestAcidGuarantees/d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,906 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-26T10:36:16,906 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-26T10:36:16,907 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-26T10:36:16,910 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411262e8a08f4427d48a88b6563e9f044eacf_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411262e8a08f4427d48a88b6563e9f044eacf_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,911 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112642600535279a4bb29c1c5d28803cd202_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112642600535279a4bb29c1c5d28803cd202_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,912 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112643a6e6ab8da84d5cb7601d31276cca95_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112643a6e6ab8da84d5cb7601d31276cca95_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,913 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411265e7bc568648b4203b5208d6d2d5382d9_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411265e7bc568648b4203b5208d6d2d5382d9_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,913 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266175816aa9ea4b139c54fc29ad855680_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266175816aa9ea4b139c54fc29ad855680_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,914 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112665e170b6dfd4433abcacf3e38ad66ce7_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112665e170b6dfd4433abcacf3e38ad66ce7_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,915 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266f752aa71afd4c34b6c8b1274f7ac5b9_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411266f752aa71afd4c34b6c8b1274f7ac5b9_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,916 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126764168e6184e493dbca07e7b21eec5f7_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126764168e6184e493dbca07e7b21eec5f7_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,917 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411267b2cbc915a244bdaa9ea923eed02daa7_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411267b2cbc915a244bdaa9ea923eed02daa7_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,918 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126801b1f6d967642b6af925d6e0a324a22_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126801b1f6d967642b6af925d6e0a324a22_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,918 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112687c849b47eb3448aa75bda41b4c35418_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112687c849b47eb3448aa75bda41b4c35418_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,919 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126a28b82377f724a1b95f6f11a6e1b18d9_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126a28b82377f724a1b95f6f11a6e1b18d9_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,920 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126b6a6150425e44eed853802f37b28d408_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126b6a6150425e44eed853802f37b28d408_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,921 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126c5566c2648154ae6843179f239b4b5b7_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126c5566c2648154ae6843179f239b4b5b7_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,922 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126de43b26d507d4e7cb654b0140b6ddc19_d75570331a075e2a47f6b93a7b93d8ef to hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241126de43b26d507d4e7cb654b0140b6ddc19_d75570331a075e2a47f6b93a7b93d8ef 2024-11-26T10:36:16,922 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-26T10:36:16,924 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=171, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:36:16,927 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-26T10:36:16,929 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-26T10:36:16,930 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=171, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:36:16,930 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-26T10:36:16,930 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732617376930"}]},"ts":"9223372036854775807"} 2024-11-26T10:36:16,932 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-26T10:36:16,932 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d75570331a075e2a47f6b93a7b93d8ef, NAME => 'TestAcidGuarantees,,1732617349541.d75570331a075e2a47f6b93a7b93d8ef.', STARTKEY => '', ENDKEY => ''}] 2024-11-26T10:36:16,932 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-26T10:36:16,932 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732617376932"}]},"ts":"9223372036854775807"} 2024-11-26T10:36:16,934 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-26T10:36:16,943 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=171, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-26T10:36:16,944 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 67 msec 2024-11-26T10:36:16,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41385 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-26T10:36:16,982 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-26T10:36:16,995 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=241 (was 239) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=323 (was 341), ProcessCount=11 (was 11), AvailableMemoryMB=5219 (was 5246) 2024-11-26T10:36:16,995 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-26T10:36:16,995 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-26T10:36:16,995 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e541e88 to 127.0.0.1:61934 2024-11-26T10:36:16,995 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:16,995 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-26T10:36:16,995 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=601527720, stopped=false 2024-11-26T10:36:16,996 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=ccf62758a0a5,41385,1732617185123 2024-11-26T10:36:17,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:36:17,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-26T10:36:17,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:36:17,007 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-26T10:36:17,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:36:17,008 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:36:17,008 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-26T10:36:17,008 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:17,009 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'ccf62758a0a5,45419,1732617185877' ***** 2024-11-26T10:36:17,009 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-26T10:36:17,009 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-26T10:36:17,009 INFO [RS:0;ccf62758a0a5:45419 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-26T10:36:17,009 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-26T10:36:17,009 INFO [RS:0;ccf62758a0a5:45419 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-26T10:36:17,009 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(3579): Received CLOSE for b7271e3c105b406e8a3f3f956110c7a1 2024-11-26T10:36:17,010 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1224): stopping server ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:17,010 DEBUG [RS:0;ccf62758a0a5:45419 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:17,010 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-26T10:36:17,010 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-26T10:36:17,010 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-26T10:36:17,010 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-26T10:36:17,010 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing b7271e3c105b406e8a3f3f956110c7a1, disabling compactions & flushes 2024-11-26T10:36:17,011 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:36:17,011 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:36:17,011 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. after waiting 0 ms 2024-11-26T10:36:17,011 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:36:17,011 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing b7271e3c105b406e8a3f3f956110c7a1 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-26T10:36:17,011 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-26T10:36:17,011 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, b7271e3c105b406e8a3f3f956110c7a1=hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1.} 2024-11-26T10:36:17,011 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-26T10:36:17,011 INFO [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-26T10:36:17,011 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-26T10:36:17,011 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-26T10:36:17,011 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-26T10:36:17,011 INFO [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-26T10:36:17,011 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, b7271e3c105b406e8a3f3f956110c7a1 2024-11-26T10:36:17,012 INFO [regionserver/ccf62758a0a5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:36:17,027 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/namespace/b7271e3c105b406e8a3f3f956110c7a1/.tmp/info/757c9bba5b7947bcb2c5a27b51c909c9 is 45, key is default/info:d/1732617191602/Put/seqid=0 2024-11-26T10:36:17,032 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/.tmp/info/98dc318322fa4bd4b7b6a41144ced194 is 143, key is hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1./info:regioninfo/1732617191449/Put/seqid=0 2024-11-26T10:36:17,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742409_1585 (size=5037) 2024-11-26T10:36:17,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742410_1586 (size=7725) 2024-11-26T10:36:17,212 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, b7271e3c105b406e8a3f3f956110c7a1 2024-11-26T10:36:17,412 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, b7271e3c105b406e8a3f3f956110c7a1 2024-11-26T10:36:17,434 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/namespace/b7271e3c105b406e8a3f3f956110c7a1/.tmp/info/757c9bba5b7947bcb2c5a27b51c909c9 2024-11-26T10:36:17,436 INFO [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/.tmp/info/98dc318322fa4bd4b7b6a41144ced194 2024-11-26T10:36:17,444 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/namespace/b7271e3c105b406e8a3f3f956110c7a1/.tmp/info/757c9bba5b7947bcb2c5a27b51c909c9 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/namespace/b7271e3c105b406e8a3f3f956110c7a1/info/757c9bba5b7947bcb2c5a27b51c909c9 2024-11-26T10:36:17,447 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/namespace/b7271e3c105b406e8a3f3f956110c7a1/info/757c9bba5b7947bcb2c5a27b51c909c9, entries=2, sequenceid=6, filesize=4.9 K 2024-11-26T10:36:17,448 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for b7271e3c105b406e8a3f3f956110c7a1 in 437ms, sequenceid=6, compaction requested=false 2024-11-26T10:36:17,450 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/namespace/b7271e3c105b406e8a3f3f956110c7a1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-26T10:36:17,451 INFO [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:36:17,451 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for b7271e3c105b406e8a3f3f956110c7a1: 2024-11-26T10:36:17,451 DEBUG [RS_CLOSE_REGION-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732617190145.b7271e3c105b406e8a3f3f956110c7a1. 2024-11-26T10:36:17,459 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/.tmp/rep_barrier/1d8f42c3c2554cd1a9790bfba37b447c is 102, key is TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad./rep_barrier:/1732617226965/DeleteFamily/seqid=0 2024-11-26T10:36:17,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742411_1587 (size=6025) 2024-11-26T10:36:17,613 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-26T10:36:17,715 INFO [regionserver/ccf62758a0a5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-26T10:36:17,715 INFO [regionserver/ccf62758a0a5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-26T10:36:17,813 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-26T10:36:17,864 INFO [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/.tmp/rep_barrier/1d8f42c3c2554cd1a9790bfba37b447c 2024-11-26T10:36:17,889 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/.tmp/table/b8b4d001f53941ca8a005e8af2961226 is 96, key is TestAcidGuarantees,,1732617191827.6a931c8e80842c8947954ecd8357e9ad./table:/1732617226965/DeleteFamily/seqid=0 2024-11-26T10:36:17,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742412_1588 (size=5942) 2024-11-26T10:36:18,013 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-26T10:36:18,013 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-26T10:36:18,014 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-26T10:36:18,214 DEBUG [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-26T10:36:18,294 INFO [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/.tmp/table/b8b4d001f53941ca8a005e8af2961226 2024-11-26T10:36:18,303 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/.tmp/info/98dc318322fa4bd4b7b6a41144ced194 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/info/98dc318322fa4bd4b7b6a41144ced194 2024-11-26T10:36:18,310 INFO [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/info/98dc318322fa4bd4b7b6a41144ced194, entries=22, sequenceid=93, filesize=7.5 K 2024-11-26T10:36:18,311 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/.tmp/rep_barrier/1d8f42c3c2554cd1a9790bfba37b447c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/rep_barrier/1d8f42c3c2554cd1a9790bfba37b447c 2024-11-26T10:36:18,314 INFO [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/rep_barrier/1d8f42c3c2554cd1a9790bfba37b447c, entries=6, sequenceid=93, filesize=5.9 K 2024-11-26T10:36:18,315 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/.tmp/table/b8b4d001f53941ca8a005e8af2961226 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/table/b8b4d001f53941ca8a005e8af2961226 2024-11-26T10:36:18,318 INFO [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/table/b8b4d001f53941ca8a005e8af2961226, entries=9, sequenceid=93, filesize=5.8 K 2024-11-26T10:36:18,319 INFO [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1308ms, sequenceid=93, compaction requested=false 2024-11-26T10:36:18,323 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-26T10:36:18,324 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-26T10:36:18,324 INFO [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-26T10:36:18,324 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-26T10:36:18,324 DEBUG [RS_CLOSE_META-regionserver/ccf62758a0a5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-26T10:36:18,414 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1250): stopping server ccf62758a0a5,45419,1732617185877; all regions closed. 2024-11-26T10:36:18,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741834_1010 (size=26050) 2024-11-26T10:36:18,428 DEBUG [RS:0;ccf62758a0a5:45419 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/oldWALs 2024-11-26T10:36:18,428 INFO [RS:0;ccf62758a0a5:45419 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL ccf62758a0a5%2C45419%2C1732617185877.meta:.meta(num 1732617189865) 2024-11-26T10:36:18,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741832_1008 (size=13235500) 2024-11-26T10:36:18,432 DEBUG [RS:0;ccf62758a0a5:45419 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/oldWALs 2024-11-26T10:36:18,432 INFO [RS:0;ccf62758a0a5:45419 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL ccf62758a0a5%2C45419%2C1732617185877:(num 1732617188881) 2024-11-26T10:36:18,432 DEBUG [RS:0;ccf62758a0a5:45419 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:18,432 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.LeaseManager(133): Closed leases 2024-11-26T10:36:18,433 INFO [RS:0;ccf62758a0a5:45419 {}] hbase.ChoreService(370): Chore service for: regionserver/ccf62758a0a5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-26T10:36:18,433 INFO [regionserver/ccf62758a0a5:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-26T10:36:18,434 INFO [RS:0;ccf62758a0a5:45419 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45419 2024-11-26T10:36:18,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ccf62758a0a5,45419,1732617185877 2024-11-26T10:36:18,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-26T10:36:18,459 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ccf62758a0a5,45419,1732617185877] 2024-11-26T10:36:18,460 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing ccf62758a0a5,45419,1732617185877; numProcessing=1 2024-11-26T10:36:18,468 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/ccf62758a0a5,45419,1732617185877 already deleted, retry=false 2024-11-26T10:36:18,468 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; ccf62758a0a5,45419,1732617185877 expired; onlineServers=0 2024-11-26T10:36:18,468 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'ccf62758a0a5,41385,1732617185123' ***** 2024-11-26T10:36:18,468 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-26T10:36:18,468 DEBUG [M:0;ccf62758a0a5:41385 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ec5a204, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ccf62758a0a5/172.17.0.2:0 2024-11-26T10:36:18,469 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.HRegionServer(1224): stopping server ccf62758a0a5,41385,1732617185123 2024-11-26T10:36:18,469 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.HRegionServer(1250): stopping server ccf62758a0a5,41385,1732617185123; all regions closed. 2024-11-26T10:36:18,469 DEBUG [M:0;ccf62758a0a5:41385 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-26T10:36:18,469 DEBUG [M:0;ccf62758a0a5:41385 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-26T10:36:18,469 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-26T10:36:18,469 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster-HFileCleaner.large.0-1732617188571 {}] cleaner.HFileCleaner(306): Exit Thread[master/ccf62758a0a5:0:becomeActiveMaster-HFileCleaner.large.0-1732617188571,5,FailOnTimeoutGroup] 2024-11-26T10:36:18,469 DEBUG [master/ccf62758a0a5:0:becomeActiveMaster-HFileCleaner.small.0-1732617188571 {}] cleaner.HFileCleaner(306): Exit Thread[master/ccf62758a0a5:0:becomeActiveMaster-HFileCleaner.small.0-1732617188571,5,FailOnTimeoutGroup] 2024-11-26T10:36:18,470 DEBUG [M:0;ccf62758a0a5:41385 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-26T10:36:18,470 INFO [M:0;ccf62758a0a5:41385 {}] hbase.ChoreService(370): Chore service for: master/ccf62758a0a5:0 had [] on shutdown 2024-11-26T10:36:18,471 DEBUG [M:0;ccf62758a0a5:41385 {}] master.HMaster(1733): Stopping service threads 2024-11-26T10:36:18,471 INFO [M:0;ccf62758a0a5:41385 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-26T10:36:18,471 ERROR [M:0;ccf62758a0a5:41385 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:44321 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:44321,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-26T10:36:18,472 INFO [M:0;ccf62758a0a5:41385 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-26T10:36:18,473 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-26T10:36:18,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-26T10:36:18,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-26T10:36:18,476 DEBUG [M:0;ccf62758a0a5:41385 {}] zookeeper.ZKUtil(347): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-26T10:36:18,476 WARN [M:0;ccf62758a0a5:41385 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-26T10:36:18,476 INFO [M:0;ccf62758a0a5:41385 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-26T10:36:18,477 INFO [M:0;ccf62758a0a5:41385 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-26T10:36:18,477 DEBUG [M:0;ccf62758a0a5:41385 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-26T10:36:18,477 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:36:18,477 DEBUG [M:0;ccf62758a0a5:41385 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:36:18,477 DEBUG [M:0;ccf62758a0a5:41385 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-26T10:36:18,477 DEBUG [M:0;ccf62758a0a5:41385 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:36:18,477 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-26T10:36:18,477 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=752.91 KB heapSize=924.82 KB 2024-11-26T10:36:18,496 DEBUG [M:0;ccf62758a0a5:41385 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1012e55f0b304088bc3cf32297e5603c is 82, key is hbase:meta,,1/info:regioninfo/1732617189985/Put/seqid=0 2024-11-26T10:36:18,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742413_1589 (size=5672) 2024-11-26T10:36:18,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:36:18,560 INFO [RS:0;ccf62758a0a5:45419 {}] regionserver.HRegionServer(1307): Exiting; stopping=ccf62758a0a5,45419,1732617185877; zookeeper connection closed. 2024-11-26T10:36:18,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45419-0x10177fdc7010001, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:36:18,560 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@398cf1ca {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@398cf1ca 2024-11-26T10:36:18,560 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-26T10:36:18,901 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2120 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1012e55f0b304088bc3cf32297e5603c 2024-11-26T10:36:18,929 DEBUG [M:0;ccf62758a0a5:41385 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f0de5d5fd044246b82c0fa573c7fe33 is 2283, key is \x00\x00\x00\x00\x00\x00\x00,/proc:d/1732617230437/Put/seqid=0 2024-11-26T10:36:18,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742414_1590 (size=43648) 2024-11-26T10:36:19,334 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=752.35 KB at sequenceid=2120 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f0de5d5fd044246b82c0fa573c7fe33 2024-11-26T10:36:19,342 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5f0de5d5fd044246b82c0fa573c7fe33 2024-11-26T10:36:19,360 DEBUG [M:0;ccf62758a0a5:41385 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1bfb615768cf48c8be3a1c3a931d9915 is 69, key is ccf62758a0a5,45419,1732617185877/rs:state/1732617188631/Put/seqid=0 2024-11-26T10:36:19,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073742415_1591 (size=5156) 2024-11-26T10:36:19,765 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2120 (bloomFilter=true), to=hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1bfb615768cf48c8be3a1c3a931d9915 2024-11-26T10:36:19,775 DEBUG [M:0;ccf62758a0a5:41385 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1012e55f0b304088bc3cf32297e5603c as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1012e55f0b304088bc3cf32297e5603c 2024-11-26T10:36:19,778 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1012e55f0b304088bc3cf32297e5603c, entries=8, sequenceid=2120, filesize=5.5 K 2024-11-26T10:36:19,779 DEBUG [M:0;ccf62758a0a5:41385 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f0de5d5fd044246b82c0fa573c7fe33 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5f0de5d5fd044246b82c0fa573c7fe33 2024-11-26T10:36:19,782 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5f0de5d5fd044246b82c0fa573c7fe33 2024-11-26T10:36:19,782 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5f0de5d5fd044246b82c0fa573c7fe33, entries=171, sequenceid=2120, filesize=42.6 K 2024-11-26T10:36:19,783 DEBUG [M:0;ccf62758a0a5:41385 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1bfb615768cf48c8be3a1c3a931d9915 as hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1bfb615768cf48c8be3a1c3a931d9915 2024-11-26T10:36:19,786 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44321/user/jenkins/test-data/9c34dcc3-c02b-511b-bdd6-5dbb6d85a3c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1bfb615768cf48c8be3a1c3a931d9915, entries=1, sequenceid=2120, filesize=5.0 K 2024-11-26T10:36:19,787 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.HRegion(3040): Finished flush of dataSize ~752.91 KB/770979, heapSize ~924.52 KB/946712, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1310ms, sequenceid=2120, compaction requested=false 2024-11-26T10:36:19,788 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-26T10:36:19,789 DEBUG [M:0;ccf62758a0a5:41385 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-26T10:36:19,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41261 is added to blk_1073741830_1006 (size=909986) 2024-11-26T10:36:19,791 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-26T10:36:19,791 INFO [M:0;ccf62758a0a5:41385 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-26T10:36:19,791 INFO [M:0;ccf62758a0a5:41385 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41385 2024-11-26T10:36:19,832 DEBUG [M:0;ccf62758a0a5:41385 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/ccf62758a0a5,41385,1732617185123 already deleted, retry=false 2024-11-26T10:36:19,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:36:19,941 INFO [M:0;ccf62758a0a5:41385 {}] regionserver.HRegionServer(1307): Exiting; stopping=ccf62758a0a5,41385,1732617185123; zookeeper connection closed. 2024-11-26T10:36:19,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41385-0x10177fdc7010000, quorum=127.0.0.1:61934, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-26T10:36:19,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-26T10:36:19,952 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:36:19,952 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:36:19,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:36:19,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/hadoop.log.dir/,STOPPED} 2024-11-26T10:36:19,956 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-26T10:36:19,956 WARN [BP-936542670-172.17.0.2-1732617181920 heartbeating to localhost/127.0.0.1:44321 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-26T10:36:19,956 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-26T10:36:19,956 WARN [BP-936542670-172.17.0.2-1732617181920 heartbeating to localhost/127.0.0.1:44321 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-936542670-172.17.0.2-1732617181920 (Datanode Uuid 6493beed-2905-4521-b551-7832b5ab41ae) service to localhost/127.0.0.1:44321 2024-11-26T10:36:19,958 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/cluster_8e76cc6b-74d5-cb1e-040e-96e811924b73/dfs/data/data1/current/BP-936542670-172.17.0.2-1732617181920 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:36:19,958 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/cluster_8e76cc6b-74d5-cb1e-040e-96e811924b73/dfs/data/data2/current/BP-936542670-172.17.0.2-1732617181920 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-26T10:36:19,959 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-26T10:36:19,967 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-26T10:36:19,968 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-26T10:36:19,968 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-26T10:36:19,968 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-26T10:36:19,968 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7a76f559-d5d5-e8b7-800c-5c4f29b03122/hadoop.log.dir/,STOPPED} 2024-11-26T10:36:19,982 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-26T10:36:20,105 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down